From 3159e6bdcc815501147db1203f6472c38fbda177 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:21:21 +0000 Subject: [PATCH 001/533] chore: update dependency @types/ws to v8.5.10 (#683) --- ecosystem-tests/node-ts-cjs-auto/package-lock.json | 6 +++--- ecosystem-tests/node-ts-cjs/package-lock.json | 6 +++--- ecosystem-tests/node-ts4.5-jest27/package-lock.json | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ecosystem-tests/node-ts-cjs-auto/package-lock.json b/ecosystem-tests/node-ts-cjs-auto/package-lock.json index 56cf77290..df381f1b5 100644 --- a/ecosystem-tests/node-ts-cjs-auto/package-lock.json +++ b/ecosystem-tests/node-ts-cjs-auto/package-lock.json @@ -1121,9 +1121,9 @@ "dev": true }, "node_modules/@types/ws": { - "version": "8.5.5", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.5.tgz", - "integrity": "sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg==", + "version": "8.5.10", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz", + "integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==", "dev": true, "dependencies": { "@types/node": "*" diff --git a/ecosystem-tests/node-ts-cjs/package-lock.json b/ecosystem-tests/node-ts-cjs/package-lock.json index f770cacac..c39fc8f1c 100644 --- a/ecosystem-tests/node-ts-cjs/package-lock.json +++ b/ecosystem-tests/node-ts-cjs/package-lock.json @@ -1163,9 +1163,9 @@ "dev": true }, "node_modules/@types/ws": { - "version": "8.5.5", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.5.tgz", - "integrity": "sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg==", + "version": "8.5.10", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz", + "integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==", "dev": true, "dependencies": { "@types/node": "*" diff --git a/ecosystem-tests/node-ts4.5-jest27/package-lock.json b/ecosystem-tests/node-ts4.5-jest27/package-lock.json index 682b0f7a6..f46e12de9 100644 --- a/ecosystem-tests/node-ts4.5-jest27/package-lock.json +++ b/ecosystem-tests/node-ts4.5-jest27/package-lock.json @@ -1096,9 +1096,9 @@ "dev": true }, "node_modules/@types/ws": { - "version": "8.5.5", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.5.tgz", - "integrity": "sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg==", + "version": "8.5.10", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz", + "integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==", "dev": true, "dependencies": { "@types/node": "*" From 684f139c0d913e64db171fe5877c7c5980e29813 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:59:26 -0500 Subject: [PATCH 002/533] chore(ci): update actions/setup-node action to v4 (#685) --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b342025cc..0f699bc95 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v3 - name: Set up Node - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: '18' From 90a733e6ff714e6fef3c71ae36e18eee6787a666 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 22 Feb 2024 15:16:53 -0500 Subject: [PATCH 003/533] chore(types): extract run status to a named type (#686) --- .github/workflows/ci.yml | 2 +- api.md | 1 + src/resources/beta/threads/index.ts | 1 + src/resources/beta/threads/runs/index.ts | 1 + src/resources/beta/threads/runs/runs.ts | 26 ++++++++++++++++-------- src/resources/beta/threads/threads.ts | 1 + 6 files changed, 22 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0f699bc95..f51c7a308 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ jobs: if: github.repository == 'openai/openai-node' steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Node uses: actions/setup-node@v4 diff --git a/api.md b/api.md index 68d8545cc..ff3180cba 100644 --- a/api.md +++ b/api.md @@ -221,6 +221,7 @@ Types: - RequiredActionFunctionToolCall - Run +- RunStatus Methods: diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index 53e26a5c6..54a02dd03 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -14,6 +14,7 @@ export { export { RequiredActionFunctionToolCall, Run, + RunStatus, RunCreateParams, RunUpdateParams, RunListParams, diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts index a2261f961..b11736c5c 100644 --- a/src/resources/beta/threads/runs/index.ts +++ b/src/resources/beta/threads/runs/index.ts @@ -14,6 +14,7 @@ export { export { RequiredActionFunctionToolCall, Run, + RunStatus, RunCreateParams, RunUpdateParams, RunListParams, diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 749d2c7f6..9582a060b 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -242,15 +242,7 @@ export interface Run { * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or * `expired`. */ - status: - | 'queued' - | 'in_progress' - | 'requires_action' - | 'cancelling' - | 'cancelled' - | 'failed' - | 'completed' - | 'expired'; + status: RunStatus; /** * The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) @@ -361,6 +353,21 @@ export namespace Run { } } +/** + * The status of the run, which can be either `queued`, `in_progress`, + * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or + * `expired`. + */ +export type RunStatus = + | 'queued' + | 'in_progress' + | 'requires_action' + | 'cancelling' + | 'cancelled' + | 'failed' + | 'completed' + | 'expired'; + export interface RunCreateParams { /** * The ID of the @@ -486,6 +493,7 @@ export namespace RunSubmitToolOutputsParams { export namespace Runs { export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall; export import Run = RunsAPI.Run; + export import RunStatus = RunsAPI.RunStatus; export import RunsPage = RunsAPI.RunsPage; export import RunCreateParams = RunsAPI.RunCreateParams; export import RunUpdateParams = RunsAPI.RunUpdateParams; diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 8bbe1804f..5aa1f8c25 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -298,6 +298,7 @@ export namespace Threads { export import Runs = RunsAPI.Runs; export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall; export import Run = RunsAPI.Run; + export import RunStatus = RunsAPI.RunStatus; export import RunsPage = RunsAPI.RunsPage; export import RunCreateParams = RunsAPI.RunCreateParams; export import RunUpdateParams = RunsAPI.RunUpdateParams; From 0ae349cf18f307a3e901149de8411caaa370fb95 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 23 Feb 2024 06:35:12 -0500 Subject: [PATCH 004/533] chore: update @types/react to 18.2.58, @types/react-dom to 18.2.19 (#688) --- ecosystem-tests/vercel-edge/package-lock.json | 16 ++++++++-------- ecosystem-tests/vercel-edge/package.json | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json index 6b44e0774..d1c67b718 100644 --- a/ecosystem-tests/vercel-edge/package-lock.json +++ b/ecosystem-tests/vercel-edge/package-lock.json @@ -15,8 +15,8 @@ }, "devDependencies": { "@types/node": "20.3.3", - "@types/react": "18.2.13", - "@types/react-dom": "18.2.6", + "@types/react": "18.2.58", + "@types/react-dom": "18.2.19", "edge-runtime": "^2.4.3", "fastest-levenshtein": "^1.0.16", "jest": "^29.5.0", @@ -1562,9 +1562,9 @@ "dev": true }, "node_modules/@types/react": { - "version": "18.2.13", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.13.tgz", - "integrity": "sha512-vJ+zElvi/Zn9cVXB5slX2xL8PZodPCwPRDpittQdw43JR2AJ5k3vKdgJJyneV/cYgIbLQUwXa9JVDvUZXGba+Q==", + "version": "18.2.58", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.58.tgz", + "integrity": "sha512-TaGvMNhxvG2Q0K0aYxiKfNDS5m5ZsoIBBbtfUorxdH4NGSXIlYvZxLJI+9Dd3KjeB3780bciLyAb7ylO8pLhPw==", "dev": true, "dependencies": { "@types/prop-types": "*", @@ -1573,9 +1573,9 @@ } }, "node_modules/@types/react-dom": { - "version": "18.2.6", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.6.tgz", - "integrity": "sha512-2et4PDvg6PVCyS7fuTc4gPoksV58bW0RwSxWKcPRcHZf0PRUGq03TKcD/rUHe3azfV6/5/biUBJw+HhCQjaP0A==", + "version": "18.2.19", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.19.tgz", + "integrity": "sha512-aZvQL6uUbIJpjZk4U8JZGbau9KDeAwMfmhyWorxgBkqDIEf6ROjRozcmPIicqsUwPUjbkDfHKgGee1Lq65APcA==", "dev": true, "dependencies": { "@types/react": "*" diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json index 9ebff4bbc..506a9d08c 100644 --- a/ecosystem-tests/vercel-edge/package.json +++ b/ecosystem-tests/vercel-edge/package.json @@ -21,8 +21,8 @@ }, "devDependencies": { "@types/node": "20.3.3", - "@types/react": "18.2.13", - "@types/react-dom": "18.2.6", + "@types/react": "18.2.58", + "@types/react-dom": "18.2.19", "edge-runtime": "^2.4.3", "fastest-levenshtein": "^1.0.16", "jest": "^29.5.0", From 5601376ef6cee6c456d94983dbc6745e2cf2ce08 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 23 Feb 2024 09:51:32 -0500 Subject: [PATCH 005/533] chore: update dependency next to v13.5.6 (#689) --- ecosystem-tests/vercel-edge/package-lock.json | 127 ++++++++---------- ecosystem-tests/vercel-edge/package.json | 2 +- 2 files changed, 60 insertions(+), 69 deletions(-) diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json index d1c67b718..ebac7eb81 100644 --- a/ecosystem-tests/vercel-edge/package-lock.json +++ b/ecosystem-tests/vercel-edge/package-lock.json @@ -9,7 +9,7 @@ "version": "0.1.0", "dependencies": { "ai": "2.1.34", - "next": "13.4.6", + "next": "13.5.6", "react": "18.2.0", "react-dom": "18.2.0" }, @@ -1171,14 +1171,14 @@ } }, "node_modules/@next/env": { - "version": "13.4.6", - "resolved": "https://registry.npmjs.org/@next/env/-/env-13.4.6.tgz", - "integrity": "sha512-nqUxEtvDqFhmV1/awSg0K2XHNwkftNaiUqCYO9e6+MYmqNObpKVl7OgMkGaQ2SZnFx5YqF0t60ZJTlyJIDAijg==" + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/@next/env/-/env-13.5.6.tgz", + "integrity": "sha512-Yac/bV5sBGkkEXmAX5FWPS9Mmo2rthrOPRQQNfycJPkjUAUclomCPH7QFVCDQ4Mp2k2K1SSM6m0zrxYrOwtFQw==" }, "node_modules/@next/swc-darwin-arm64": { - "version": "13.4.6", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.4.6.tgz", - "integrity": "sha512-ahi6VP98o4HV19rkOXPSUu+ovfHfUxbJQ7VVJ7gL2FnZRr7onEFC1oGQ6NQHpm8CxpIzSSBW79kumlFMOmZVjg==", + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.5.6.tgz", + "integrity": "sha512-5nvXMzKtZfvcu4BhtV0KH1oGv4XEW+B+jOfmBdpFI3C7FrB/MfujRpWYSBBO64+qbW8pkZiSyQv9eiwnn5VIQA==", "cpu": [ "arm64" ], @@ -1191,9 +1191,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "13.4.6", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.4.6.tgz", - "integrity": "sha512-13cXxKFsPJIJKzUqrU5XB1mc0xbUgYsRcdH6/rB8c4NMEbWGdtD4QoK9ShN31TZdePpD4k416Ur7p+deMIxnnA==", + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.5.6.tgz", + "integrity": "sha512-6cgBfxg98oOCSr4BckWjLLgiVwlL3vlLj8hXg2b+nDgm4bC/qVXXLfpLB9FHdoDu4057hzywbxKvmYGmi7yUzA==", "cpu": [ "x64" ], @@ -1206,9 +1206,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "13.4.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.4.6.tgz", - "integrity": "sha512-Ti+NMHEjTNktCVxNjeWbYgmZvA2AqMMI2AMlzkXsU7W4pXCMhrryAmAIoo+7YdJbsx01JQWYVxGe62G6DoCLaA==", + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.5.6.tgz", + "integrity": "sha512-txagBbj1e1w47YQjcKgSU4rRVQ7uF29YpnlHV5xuVUsgCUf2FmyfJ3CPjZUvpIeXCJAoMCFAoGnbtX86BK7+sg==", "cpu": [ "arm64" ], @@ -1221,9 +1221,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "13.4.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.4.6.tgz", - "integrity": "sha512-OHoC6gO7XfjstgwR+z6UHKlvhqJfyMtNaJidjx3sEcfaDwS7R2lqR5AABi8PuilGgi0BO0O0sCXqLlpp3a0emQ==", + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.5.6.tgz", + "integrity": "sha512-cGd+H8amifT86ZldVJtAKDxUqeFyLWW+v2NlBULnLAdWsiuuN8TuhVBt8ZNpCqcAuoruoSWynvMWixTFcroq+Q==", "cpu": [ "arm64" ], @@ -1236,9 +1236,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "13.4.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.4.6.tgz", - "integrity": "sha512-zHZxPGkUlpfNJCboUrFqwlwEX5vI9LSN70b8XEb0DYzzlrZyCyOi7hwDp/+3Urm9AB7YCAJkgR5Sp1XBVjHdfQ==", + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.5.6.tgz", + "integrity": "sha512-Mc2b4xiIWKXIhBy2NBTwOxGD3nHLmq4keFk+d4/WL5fMsB8XdJRdtUlL87SqVCTSaf1BRuQQf1HvXZcy+rq3Nw==", "cpu": [ "x64" ], @@ -1251,9 +1251,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "13.4.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.4.6.tgz", - "integrity": "sha512-K/Y8lYGTwTpv5ME8PSJxwxLolaDRdVy+lOd9yMRMiQE0BLUhtxtCWC9ypV42uh9WpLjoaD0joOsB9Q6mbrSGJg==", + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.5.6.tgz", + "integrity": "sha512-CFHvP9Qz98NruJiUnCe61O6GveKKHpJLloXbDSWRhqhkJdZD2zU5hG+gtVJR//tyW897izuHpM6Gtf6+sNgJPQ==", "cpu": [ "x64" ], @@ -1266,9 +1266,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "13.4.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.4.6.tgz", - "integrity": "sha512-U6LtxEUrjBL2tpW+Kr1nHCSJWNeIed7U7l5o7FiKGGwGgIlFi4UHDiLI6TQ2lxi20fAU33CsruV3U0GuzMlXIw==", + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.5.6.tgz", + "integrity": "sha512-aFv1ejfkbS7PUa1qVPwzDHjQWQtknzAZWGTKYIAaS4NMtBlk3VyA6AYn593pqNanlicewqyl2jUhQAaFV/qXsg==", "cpu": [ "arm64" ], @@ -1281,9 +1281,9 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "13.4.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.4.6.tgz", - "integrity": "sha512-eEBeAqpCfhdPSlCZCayjCiyIllVqy4tcqvm1xmg3BgJG0G5ITiMM4Cw2WVeRSgWDJqQGRyyb+q8Y2ltzhXOWsQ==", + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.5.6.tgz", + "integrity": "sha512-XqqpHgEIlBHvzwG8sp/JXMFkLAfGLqkbVsyN+/Ih1mR8INb6YCc2x/Mbwi6hsAgUnqQztz8cvEbHJUbSl7RHDg==", "cpu": [ "ia32" ], @@ -1296,9 +1296,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "13.4.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.4.6.tgz", - "integrity": "sha512-OrZs94AuO3ZS5tnqlyPRNgfWvboXaDQCi5aXGve3o3C+Sj0ctMUV9+Do+0zMvvLRumR8E0PTWKvtz9n5vzIsWw==", + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.5.6.tgz", + "integrity": "sha512-Cqfe1YmOS7k+5mGu92nl5ULkzpKuxJrP3+4AEuPmrpFZ3BHxTY3TnHmU1On3bFmFFs6FbTcdF58CCUProGpIGQ==", "cpu": [ "x64" ], @@ -1410,9 +1410,9 @@ } }, "node_modules/@swc/helpers": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.1.tgz", - "integrity": "sha512-sJ902EfIzn1Fa+qYmjdQqh8tPsoxyBz+8yBKC2HKUxyezKJFwPGOn7pv4WY6QuQW//ySQi5lJjA/ZT9sNWWNTg==", + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.2.tgz", + "integrity": "sha512-E4KcWTpoLHqwPHLxidpOqQbcrZVgi0rsmmZXUle1jXmJfuIf/UWpczUJ7MZZ5tlxytgJXyp0w4PGkkeLiuIdZw==", "dependencies": { "tslib": "^2.4.0" } @@ -5064,39 +5064,37 @@ "dev": true }, "node_modules/next": { - "version": "13.4.6", - "resolved": "https://registry.npmjs.org/next/-/next-13.4.6.tgz", - "integrity": "sha512-sjVqjxU+U2aXZnYt4Ud6CTLNNwWjdSfMgemGpIQJcN3Z7Jni9xRWbR0ie5fQzCg87aLqQVhKA2ud2gPoqJ9lGw==", + "version": "13.5.6", + "resolved": "https://registry.npmjs.org/next/-/next-13.5.6.tgz", + "integrity": "sha512-Y2wTcTbO4WwEsVb4A8VSnOsG1I9ok+h74q0ZdxkwM3EODqrs4pasq7O0iUxbcS9VtWMicG7f3+HAj0r1+NtKSw==", "dependencies": { - "@next/env": "13.4.6", - "@swc/helpers": "0.5.1", + "@next/env": "13.5.6", + "@swc/helpers": "0.5.2", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001406", - "postcss": "8.4.14", + "postcss": "8.4.31", "styled-jsx": "5.1.1", - "watchpack": "2.4.0", - "zod": "3.21.4" + "watchpack": "2.4.0" }, "bin": { "next": "dist/bin/next" }, "engines": { - "node": ">=16.8.0" + "node": ">=16.14.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "13.4.6", - "@next/swc-darwin-x64": "13.4.6", - "@next/swc-linux-arm64-gnu": "13.4.6", - "@next/swc-linux-arm64-musl": "13.4.6", - "@next/swc-linux-x64-gnu": "13.4.6", - "@next/swc-linux-x64-musl": "13.4.6", - "@next/swc-win32-arm64-msvc": "13.4.6", - "@next/swc-win32-ia32-msvc": "13.4.6", - "@next/swc-win32-x64-msvc": "13.4.6" + "@next/swc-darwin-arm64": "13.5.6", + "@next/swc-darwin-x64": "13.5.6", + "@next/swc-linux-arm64-gnu": "13.5.6", + "@next/swc-linux-arm64-musl": "13.5.6", + "@next/swc-linux-x64-gnu": "13.5.6", + "@next/swc-linux-x64-musl": "13.5.6", + "@next/swc-win32-arm64-msvc": "13.5.6", + "@next/swc-win32-ia32-msvc": "13.5.6", + "@next/swc-win32-x64-msvc": "13.5.6" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", - "fibers": ">= 3.1.0", "react": "^18.2.0", "react-dom": "^18.2.0", "sass": "^1.3.0" @@ -5105,9 +5103,6 @@ "@opentelemetry/api": { "optional": true }, - "fibers": { - "optional": true - }, "sass": { "optional": true } @@ -5419,9 +5414,9 @@ } }, "node_modules/postcss": { - "version": "8.4.14", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.14.tgz", - "integrity": "sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "funding": [ { "type": "opencollective", @@ -5430,10 +5425,14 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" }, @@ -6715,14 +6714,6 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } - }, - "node_modules/zod": { - "version": "3.21.4", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.21.4.tgz", - "integrity": "sha512-m46AKbrzKVzOzs/DZgVnG5H55N1sv1M8qZU3A8RIKbs3mrACDNeIOeilDymVb2HdmP8uwshOCF4uJ8uM9rCqJw==", - "funding": { - "url": "https://github.com/sponsors/colinhacks" - } } } } diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json index 506a9d08c..171ba9c1a 100644 --- a/ecosystem-tests/vercel-edge/package.json +++ b/ecosystem-tests/vercel-edge/package.json @@ -15,7 +15,7 @@ }, "dependencies": { "ai": "2.1.34", - "next": "13.4.6", + "next": "13.5.6", "react": "18.2.0", "react-dom": "18.2.0" }, From e84773e3813a2f71cd542c3b04eb9842d99eb0ca Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 23 Feb 2024 10:31:41 -0500 Subject: [PATCH 006/533] chore: update dependency @types/node to v20.11.20 (#690) --- .../node-ts-cjs-auto/package-lock.json | 17 +++++++++++++---- ecosystem-tests/node-ts-cjs/package-lock.json | 17 +++++++++++++---- .../node-ts-esm-auto/package-lock.json | 17 +++++++++++++---- .../node-ts-esm-web/package-lock.json | 17 +++++++++++++---- ecosystem-tests/node-ts-esm/package-lock.json | 17 +++++++++++++---- .../node-ts4.5-jest27/package-lock.json | 17 +++++++++++++---- 6 files changed, 78 insertions(+), 24 deletions(-) diff --git a/ecosystem-tests/node-ts-cjs-auto/package-lock.json b/ecosystem-tests/node-ts-cjs-auto/package-lock.json index df381f1b5..a11f9814d 100644 --- a/ecosystem-tests/node-ts-cjs-auto/package-lock.json +++ b/ecosystem-tests/node-ts-cjs-auto/package-lock.json @@ -1093,10 +1093,13 @@ } }, "node_modules/@types/node": { - "version": "20.5.7", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.7.tgz", - "integrity": "sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==", - "dev": true + "version": "20.11.20", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", + "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", + "dev": true, + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/node-fetch": { "version": "2.6.4", @@ -3684,6 +3687,12 @@ "node": ">=4.2.0" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true + }, "node_modules/update-browserslist-db": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", diff --git a/ecosystem-tests/node-ts-cjs/package-lock.json b/ecosystem-tests/node-ts-cjs/package-lock.json index c39fc8f1c..c5280c5b5 100644 --- a/ecosystem-tests/node-ts-cjs/package-lock.json +++ b/ecosystem-tests/node-ts-cjs/package-lock.json @@ -1135,10 +1135,13 @@ } }, "node_modules/@types/node": { - "version": "20.5.7", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.7.tgz", - "integrity": "sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==", - "dev": true + "version": "20.11.20", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", + "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", + "dev": true, + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/node-fetch": { "version": "2.6.4", @@ -4244,6 +4247,12 @@ "node": ">=4.2.0" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true + }, "node_modules/universalify": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", diff --git a/ecosystem-tests/node-ts-esm-auto/package-lock.json b/ecosystem-tests/node-ts-esm-auto/package-lock.json index 1123560d4..4bce04f80 100644 --- a/ecosystem-tests/node-ts-esm-auto/package-lock.json +++ b/ecosystem-tests/node-ts-esm-auto/package-lock.json @@ -1157,10 +1157,13 @@ } }, "node_modules/@types/node": { - "version": "20.5.7", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.7.tgz", - "integrity": "sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==", - "dev": true + "version": "20.11.20", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", + "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", + "dev": true, + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/stack-utils": { "version": "2.0.3", @@ -3812,6 +3815,12 @@ "node": ">=4.2.0" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true + }, "node_modules/update-browserslist-db": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", diff --git a/ecosystem-tests/node-ts-esm-web/package-lock.json b/ecosystem-tests/node-ts-esm-web/package-lock.json index a2b14d348..b96128a4e 100644 --- a/ecosystem-tests/node-ts-esm-web/package-lock.json +++ b/ecosystem-tests/node-ts-esm-web/package-lock.json @@ -1157,10 +1157,13 @@ } }, "node_modules/@types/node": { - "version": "20.5.7", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.7.tgz", - "integrity": "sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==", - "dev": true + "version": "20.11.20", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", + "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", + "dev": true, + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/stack-utils": { "version": "2.0.3", @@ -3812,6 +3815,12 @@ "node": ">=4.2.0" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true + }, "node_modules/update-browserslist-db": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", diff --git a/ecosystem-tests/node-ts-esm/package-lock.json b/ecosystem-tests/node-ts-esm/package-lock.json index 480a700fe..4aecff6ca 100644 --- a/ecosystem-tests/node-ts-esm/package-lock.json +++ b/ecosystem-tests/node-ts-esm/package-lock.json @@ -1157,10 +1157,13 @@ } }, "node_modules/@types/node": { - "version": "20.5.7", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.7.tgz", - "integrity": "sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==", - "dev": true + "version": "20.11.20", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", + "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", + "dev": true, + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/stack-utils": { "version": "2.0.3", @@ -3812,6 +3815,12 @@ "node": ">=4.2.0" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true + }, "node_modules/update-browserslist-db": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", diff --git a/ecosystem-tests/node-ts4.5-jest27/package-lock.json b/ecosystem-tests/node-ts4.5-jest27/package-lock.json index f46e12de9..76813597f 100644 --- a/ecosystem-tests/node-ts4.5-jest27/package-lock.json +++ b/ecosystem-tests/node-ts4.5-jest27/package-lock.json @@ -1068,10 +1068,13 @@ } }, "node_modules/@types/node": { - "version": "20.6.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.6.0.tgz", - "integrity": "sha512-najjVq5KN2vsH2U/xyh2opaSEz6cZMR2SetLIlxlj08nOcmPOemJmUK2o4kUzfLqfrWE0PIrNeE16XhYDd3nqg==", - "dev": true + "version": "20.11.20", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", + "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", + "dev": true, + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/node-fetch": { "version": "2.6.5", @@ -4117,6 +4120,12 @@ "node": ">=4.2.0" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true + }, "node_modules/universalify": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", From 0372eaaf6f33bfbc3cb6294a2fd6b3bab9e7ba80 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 23 Feb 2024 19:18:24 -0500 Subject: [PATCH 007/533] feat(api): add wav and pcm to response_format (#691) --- src/resources/audio/speech.ts | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index faa281686..d5ef09118 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -35,9 +35,13 @@ export interface SpeechCreateParams { voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; /** - * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + * The format to return audio in. Supported formats are `mp3`, `opus`, `aac`, + * `flac`, `pcm`, and `wav`. + * + * The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz + * sample rate, mono channel, and 16-bit depth in signed little-endian format. */ - response_format?: 'mp3' | 'opus' | 'aac' | 'flac'; + response_format?: 'mp3' | 'opus' | 'aac' | 'flac' | 'pcm' | 'wav'; /** * The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is From 5961cb8cea11065efd1ffee9db14f19ad7054ad5 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Sat, 24 Feb 2024 16:17:31 +0100 Subject: [PATCH 008/533] chore(internal): fix ecosystem tests (#693) --- .../node-ts-cjs-auto/moduleResolution/node/type-tests.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/ecosystem-tests/node-ts-cjs-auto/moduleResolution/node/type-tests.ts b/ecosystem-tests/node-ts-cjs-auto/moduleResolution/node/type-tests.ts index a3c4f383b..2621b2b47 100644 --- a/ecosystem-tests/node-ts-cjs-auto/moduleResolution/node/type-tests.ts +++ b/ecosystem-tests/node-ts-cjs-auto/moduleResolution/node/type-tests.ts @@ -9,6 +9,5 @@ async function typeTests() { model: 'whisper-1', }) .asResponse(); - // @ts-expect-error this doesn't work with "moduleResolution": "node" response.body; } From 6175eca426b15990be5e5cdb0e8497e547f87d8a Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 28 Feb 2024 06:06:00 +0100 Subject: [PATCH 009/533] release: 4.28.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 24 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 8d5375100..5934251e9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.28.3" + ".": "4.28.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 274b8e8a5..68ebe3767 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 4.28.4 (2024-02-28) + +Full Changelog: [v4.28.3...v4.28.4](https://github.com/openai/openai-node/compare/v4.28.3...v4.28.4) + +### Features + +* **api:** add wav and pcm to response_format ([#691](https://github.com/openai/openai-node/issues/691)) ([b1c6171](https://github.com/openai/openai-node/commit/b1c61711961a62a4d7b47909a68ecd65231a66af)) + + +### Chores + +* **ci:** update actions/setup-node action to v4 ([#685](https://github.com/openai/openai-node/issues/685)) ([f2704d5](https://github.com/openai/openai-node/commit/f2704d5f1580c0f1d31584ef88702cde8f6804d4)) +* **internal:** fix ecosystem tests ([#693](https://github.com/openai/openai-node/issues/693)) ([616624d](https://github.com/openai/openai-node/commit/616624d3d9fd10ce254ce0d435b2b73ed11679f2)) +* **types:** extract run status to a named type ([#686](https://github.com/openai/openai-node/issues/686)) ([b3b3b8e](https://github.com/openai/openai-node/commit/b3b3b8ea20e0f311d3bd53dfd22ccc04f5dce5f7)) +* update @types/react to 18.2.58, @types/react-dom to 18.2.19 ([#688](https://github.com/openai/openai-node/issues/688)) ([2a0d0b1](https://github.com/openai/openai-node/commit/2a0d0b1cb197eef25e42bbba88ee90c37d623f24)) +* update dependency @types/node to v20.11.20 ([#690](https://github.com/openai/openai-node/issues/690)) ([4ca005b](https://github.com/openai/openai-node/commit/4ca005be082d6c50fe95da6148896b62080bfe07)) +* update dependency @types/ws to v8.5.10 ([#683](https://github.com/openai/openai-node/issues/683)) ([a617268](https://github.com/openai/openai-node/commit/a6172683a3390422984ad282ac4940781493e772)) +* update dependency next to v13.5.6 ([#689](https://github.com/openai/openai-node/issues/689)) ([abb3b66](https://github.com/openai/openai-node/commit/abb3b6674b8f9f8ff9c2cc61629a31883ae4d8c8)) + ## 4.28.3 (2024-02-20) Full Changelog: [v4.28.2...v4.28.3](https://github.com/openai/openai-node/compare/v4.28.2...v4.28.3) diff --git a/README.md b/README.md index c7779e79a..ef174634e 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.28.3/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.28.4/mod.ts'; ``` diff --git a/build-deno b/build-deno index c8215c85d..74d994d08 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.28.3/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.28.4/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 455c0d180..65d6046f6 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.28.3", + "version": "4.28.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 3975f7a3e..9dd894067 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.28.3'; // x-release-please-version +export const VERSION = '4.28.4'; // x-release-please-version From 08c5974033dfdb3e60ad50305e2a9aafd586d3f2 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 29 Feb 2024 17:39:28 +0100 Subject: [PATCH 010/533] docs(contributing): improve wording (#696) --- CONTRIBUTING.md | 6 +++--- README.md | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 61f37370f..693e9ea70 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,7 +3,7 @@ This repository uses [`yarn@v1`](https://classic.yarnpkg.com/lang/en/docs/install/#mac-stable). Other package managers may work but are not officially supported for development. -To setup the repository, run: +To set up the repository, run: ```bash yarn @@ -65,7 +65,7 @@ pnpm link -—global openai ## Running tests -Most tests will require you to [setup a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. +Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. ```bash npx prism path/to/your/openapi.yml @@ -99,7 +99,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish NPM` GitHub action](https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml). This will require a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish NPM` GitHub action](https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/README.md b/README.md index ef174634e..e8ff603e9 100644 --- a/README.md +++ b/README.md @@ -424,7 +424,7 @@ import OpenAI from 'openai'; ``` To do the inverse, add `import "openai/shims/node"` (which does import polyfills). -This can also be useful if you are getting the wrong TypeScript types for `Response` - more details [here](https://github.com/openai/openai-node/tree/master/src/_shims#readme). +This can also be useful if you are getting the wrong TypeScript types for `Response` ([more details](https://github.com/openai/openai-node/tree/master/src/_shims#readme)). You may also provide a custom `fetch` function when instantiating the client, which can be used to inspect or alter the `Request` or `Response` before/after each request: From c3fee07c78fef9115da353fab8f5e399f81cdc93 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 29 Feb 2024 21:56:48 +0100 Subject: [PATCH 011/533] docs(readme): fix typo in custom fetch implementation (#698) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e8ff603e9..68d356f8f 100644 --- a/README.md +++ b/README.md @@ -434,7 +434,7 @@ import { fetch } from 'undici'; // as one example import OpenAI from 'openai'; const client = new OpenAI({ - fetch: async (url: RequestInfo, init?: RequestInfo): Promise => { + fetch: async (url: RequestInfo, init?: RequestInit): Promise => { console.log('About to make a request', url, init); const response = await fetch(url, init); console.log('Got response', response); From 181a5dddb650f1b060b88cbe3bf7293ddfecebdf Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:32:50 +0100 Subject: [PATCH 012/533] fix(ChatCompletionStream): abort on async iterator break and handle errors (#699) `break`-ing the async iterator did not previously abort the request which increases usage. Errors are now handled more effectively in the async iterator. --- src/lib/ChatCompletionRunFunctions.test.ts | 53 +++++++++++++++++++++- src/lib/ChatCompletionStream.ts | 35 +++++++++++--- 2 files changed, 81 insertions(+), 7 deletions(-) diff --git a/src/lib/ChatCompletionRunFunctions.test.ts b/src/lib/ChatCompletionRunFunctions.test.ts index bb360b217..b524218ae 100644 --- a/src/lib/ChatCompletionRunFunctions.test.ts +++ b/src/lib/ChatCompletionRunFunctions.test.ts @@ -1,5 +1,5 @@ import OpenAI from 'openai'; -import { OpenAIError } from 'openai/error'; +import { OpenAIError, APIConnectionError } from 'openai/error'; import { PassThrough } from 'stream'; import { ParsingToolFunction, @@ -2207,6 +2207,7 @@ describe('resource completions', () => { await listener.sanityCheck(); }); }); + describe('stream', () => { test('successful flow', async () => { const { fetch, handleRequest } = mockStreamingChatCompletionFetch(); @@ -2273,5 +2274,55 @@ describe('resource completions', () => { expect(listener.finalMessage).toEqual({ role: 'assistant', content: 'The weather is great today!' }); await listener.sanityCheck(); }); + test('handles network errors', async () => { + const { fetch, handleRequest } = mockFetch(); + + const openai = new OpenAI({ apiKey: '...', fetch }); + + const stream = openai.beta.chat.completions.stream( + { + max_tokens: 1024, + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'Say hello there!' }], + }, + { maxRetries: 0 }, + ); + + handleRequest(async () => { + throw new Error('mock request error'); + }).catch(() => {}); + + async function runStream() { + await stream.done(); + } + + await expect(runStream).rejects.toThrow(APIConnectionError); + }); + test('handles network errors on async iterator', async () => { + const { fetch, handleRequest } = mockFetch(); + + const openai = new OpenAI({ apiKey: '...', fetch }); + + const stream = openai.beta.chat.completions.stream( + { + max_tokens: 1024, + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'Say hello there!' }], + }, + { maxRetries: 0 }, + ); + + handleRequest(async () => { + throw new Error('mock request error'); + }).catch(() => {}); + + async function runStream() { + for await (const _event of stream) { + continue; + } + } + + await expect(runStream).rejects.toThrow(APIConnectionError); + }); }); }); diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts index a2aa7032e..2ea040383 100644 --- a/src/lib/ChatCompletionStream.ts +++ b/src/lib/ChatCompletionStream.ts @@ -210,13 +210,16 @@ export class ChatCompletionStream [Symbol.asyncIterator](): AsyncIterator { const pushQueue: ChatCompletionChunk[] = []; - const readQueue: ((chunk: ChatCompletionChunk | undefined) => void)[] = []; + const readQueue: { + resolve: (chunk: ChatCompletionChunk | undefined) => void; + reject: (err: unknown) => void; + }[] = []; let done = false; this.on('chunk', (chunk) => { const reader = readQueue.shift(); if (reader) { - reader(chunk); + reader.resolve(chunk); } else { pushQueue.push(chunk); } @@ -225,7 +228,23 @@ export class ChatCompletionStream this.on('end', () => { done = true; for (const reader of readQueue) { - reader(undefined); + reader.resolve(undefined); + } + readQueue.length = 0; + }); + + this.on('abort', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + this.on('error', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); } readQueue.length = 0; }); @@ -236,13 +255,17 @@ export class ChatCompletionStream if (done) { return { value: undefined, done: true }; } - return new Promise((resolve) => readQueue.push(resolve)).then( - (chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true }), - ); + return new Promise((resolve, reject) => + readQueue.push({ resolve, reject }), + ).then((chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true })); } const chunk = pushQueue.shift()!; return { value: chunk, done: false }; }, + return: async () => { + this.abort(); + return { value: undefined, done: true }; + }, }; } From 18d9cb729d23871976368e8b5c40515661a8bd4b Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 1 Mar 2024 14:57:12 +0100 Subject: [PATCH 013/533] chore(docs): mention install from git repo (#700) --- CONTRIBUTING.md | 2 ++ README.md | 1 + 2 files changed, 3 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 693e9ea70..297322d17 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -43,6 +43,8 @@ To install via git: ```bash npm install --save git+ssh://git@github.com:openai/openai-node.git +# or +yarn add git+ssh://git@github.com:openai/openai-node.git ``` Alternatively, to link a local copy of the repo: diff --git a/README.md b/README.md index 68d356f8f..dd3ac15c0 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ To learn how to use the OpenAI API, check out our [API Reference](https://platfo ## Installation ```sh +# install from NPM npm install --save openai # or yarn add openai From c21ef88b650b996dd0cf97f36294db464573b531 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 4 Mar 2024 19:17:09 +0100 Subject: [PATCH 014/533] chore(api): update docs (#703) --- src/resources/audio/speech.ts | 9 +++------ src/resources/audio/transcriptions.ts | 18 ++++++++++++++---- src/resources/audio/translations.ts | 3 ++- src/resources/beta/threads/runs/runs.ts | 4 ++-- src/resources/chat/completions.ts | 14 +++++++++----- src/resources/images.ts | 9 ++++++--- src/resources/moderations.ts | 8 +++----- 7 files changed, 39 insertions(+), 26 deletions(-) diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index d5ef09118..7d0ee2195 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -35,13 +35,10 @@ export interface SpeechCreateParams { voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; /** - * The format to return audio in. Supported formats are `mp3`, `opus`, `aac`, - * `flac`, `pcm`, and `wav`. - * - * The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz - * sample rate, mono channel, and 16-bit depth in signed little-endian format. + * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + * `wav`, and `pcm`. */ - response_format?: 'mp3' | 'opus' | 'aac' | 'flac' | 'pcm' | 'wav'; + response_format?: 'mp3' | 'opus' | 'aac' | 'flac' | 'wav' | 'pcm'; /** * The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 7f381c5a3..ab2079ed6 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -14,7 +14,14 @@ export class Transcriptions extends APIResource { } } +/** + * Represents a transcription response returned by model, based on the provided + * input. + */ export interface Transcription { + /** + * The transcribed text. + */ text: string; } @@ -26,7 +33,8 @@ export interface TranscriptionCreateParams { file: Uploadable; /** - * ID of the model to use. Only `whisper-1` is currently available. + * ID of the model to use. Only `whisper-1` (which is powered by our open source + * Whisper V2 model) is currently available. */ model: (string & {}) | 'whisper-1'; @@ -61,9 +69,11 @@ export interface TranscriptionCreateParams { temperature?: number; /** - * The timestamp granularities to populate for this transcription. Any of these - * options: `word`, or `segment`. Note: There is no additional latency for segment - * timestamps, but generating word timestamps incurs additional latency. + * The timestamp granularities to populate for this transcription. + * `response_format` must be set `verbose_json` to use timestamp granularities. + * Either or both of these options are supported: `word`, or `segment`. Note: There + * is no additional latency for segment timestamps, but generating word timestamps + * incurs additional latency. */ timestamp_granularities?: Array<'word' | 'segment'>; } diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 54583ce1f..e68a714fb 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -26,7 +26,8 @@ export interface TranslationCreateParams { file: Uploadable; /** - * ID of the model to use. Only `whisper-1` is currently available. + * ID of the model to use. Only `whisper-1` (which is powered by our open source + * Whisper V2 model) is currently available. */ model: (string & {}) | 'whisper-1'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 9582a060b..9a0bc00dd 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -270,9 +270,9 @@ export namespace Run { */ export interface LastError { /** - * One of `server_error` or `rate_limit_exceeded`. + * One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. */ - code: 'server_error' | 'rate_limit_exceeded'; + code: 'server_error' | 'rate_limit_exceeded' | 'invalid_prompt'; /** * A human-readable description of the error. diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 2a5216745..44627eb85 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -546,7 +546,9 @@ export interface ChatCompletionTokenLogprob { bytes: Array | null; /** - * The log probability of this token. + * The log probability of this token, if it is within the top 20 most likely + * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + * unlikely. */ logprob: number; @@ -574,7 +576,9 @@ export namespace ChatCompletionTokenLogprob { bytes: Array | null; /** - * The log probability of this token. + * The log probability of this token, if it is within the top 20 most likely + * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + * unlikely. */ logprob: number; } @@ -827,9 +831,9 @@ export interface ChatCompletionCreateParamsBase { tools?: Array; /** - * An integer between 0 and 5 specifying the number of most likely tokens to return - * at each token position, each with an associated log probability. `logprobs` must - * be set to `true` if this parameter is used. + * An integer between 0 and 20 specifying the number of most likely tokens to + * return at each token position, each with an associated log probability. + * `logprobs` must be set to `true` if this parameter is used. */ top_logprobs?: number | null; diff --git a/src/resources/images.ts b/src/resources/images.ts index 4bc654903..bc5b9edc0 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -80,7 +80,8 @@ export interface ImageCreateVariationParams { /** * The format in which the generated images are returned. Must be one of `url` or - * `b64_json`. + * `b64_json`. URLs are only valid for 60 minutes after the image has been + * generated. */ response_format?: 'url' | 'b64_json' | null; @@ -131,7 +132,8 @@ export interface ImageEditParams { /** * The format in which the generated images are returned. Must be one of `url` or - * `b64_json`. + * `b64_json`. URLs are only valid for 60 minutes after the image has been + * generated. */ response_format?: 'url' | 'b64_json' | null; @@ -176,7 +178,8 @@ export interface ImageGenerateParams { /** * The format in which the generated images are returned. Must be one of `url` or - * `b64_json`. + * `b64_json`. URLs are only valid for 60 minutes after the image has been + * generated. */ response_format?: 'url' | 'b64_json' | null; diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index 8bde6ecca..a43006ccf 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -6,7 +6,7 @@ import * as ModerationsAPI from 'openai/resources/moderations'; export class Moderations extends APIResource { /** - * Classifies if text violates OpenAI's Content Policy + * Classifies if text is potentially harmful. */ create( body: ModerationCreateParams, @@ -28,8 +28,7 @@ export interface Moderation { category_scores: Moderation.CategoryScores; /** - * Whether the content violates - * [OpenAI's usage policies](/policies/usage-policies). + * Whether any of the below categories are flagged. */ flagged: boolean; } @@ -170,8 +169,7 @@ export namespace Moderation { } /** - * Represents policy compliance report by OpenAI's content moderation model against - * a given input. + * Represents if a given text input is potentially harmful. */ export interface ModerationCreateResponse { /** From 34e128fad382d1aeac9912d85c50291c4882d298 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 4 Mar 2024 21:53:09 +0100 Subject: [PATCH 015/533] chore: fix error handler in readme (#704) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index dd3ac15c0..1cfb2537a 100644 --- a/README.md +++ b/README.md @@ -275,7 +275,7 @@ a subclass of `APIError` will be thrown: async function main() { const job = await openai.fineTuning.jobs .create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123' }) - .catch((err) => { + .catch(async (err) => { if (err instanceof OpenAI.APIError) { console.log(err.status); // 400 console.log(err.name); // BadRequestError From 588b30f6f5604387cb2ade716b6cf693e1175cec Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 5 Mar 2024 12:10:53 +0100 Subject: [PATCH 016/533] docs(readme): fix https proxy example (#705) --- README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 1cfb2537a..1207d5d24 100644 --- a/README.md +++ b/README.md @@ -456,7 +456,7 @@ If you would like to disable or customize this behavior, for example to use the ```ts import http from 'http'; -import HttpsProxyAgent from 'https-proxy-agent'; +import { HttpsProxyAgent } from 'https-proxy-agent'; // Configure the default for all requests: const openai = new OpenAI({ @@ -465,9 +465,8 @@ const openai = new OpenAI({ // Override per-request: await openai.models.list({ - baseURL: 'http://localhost:8080/test-api', httpAgent: new http.Agent({ keepAlive: false }), -}) +}); ``` ## Semantic Versioning From 753bced18a57cd4a7739e8e03a7b7933048be79f Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 6 Mar 2024 17:42:34 +0100 Subject: [PATCH 017/533] fix(streaming): correctly handle trailing new lines in byte chunks (#708) --- src/streaming.ts | 8 +++++++- tests/streaming.test.ts | 42 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 tests/streaming.test.ts diff --git a/src/streaming.ts b/src/streaming.ts index 7d8b4442a..1b59bce20 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -267,7 +267,7 @@ class SSEDecoder { * * https://github.com/encode/httpx/blob/920333ea98118e9cf617f246905d7b202510941c/httpx/_decoders.py#L258 */ -class LineDecoder { +export class LineDecoder { // prettier-ignore static NEWLINE_CHARS = new Set(['\n', '\r', '\x0b', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029']); static NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g; @@ -300,6 +300,12 @@ class LineDecoder { const trailingNewline = LineDecoder.NEWLINE_CHARS.has(text[text.length - 1] || ''); let lines = text.split(LineDecoder.NEWLINE_REGEXP); + // if there is a trailing new line then the last entry will be an empty + // string which we don't care about + if (trailingNewline) { + lines.pop(); + } + if (lines.length === 1 && !trailingNewline) { this.buffer.push(lines[0]!); return []; diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts new file mode 100644 index 000000000..45cf6f6cd --- /dev/null +++ b/tests/streaming.test.ts @@ -0,0 +1,42 @@ +import { LineDecoder } from 'openai/streaming'; + +function decodeChunks(chunks: string[], decoder?: LineDecoder): string[] { + if (!decoder) { + decoder = new LineDecoder(); + } + + const lines = []; + for (const chunk of chunks) { + lines.push(...decoder.decode(chunk)); + } + + return lines; +} + +describe('line decoder', () => { + test('basic', () => { + // baz is not included because the line hasn't ended yet + expect(decodeChunks(['foo', ' bar\nbaz'])).toEqual(['foo bar']); + }); + + test('basic with \\r', () => { + // baz is not included because the line hasn't ended yet + expect(decodeChunks(['foo', ' bar\r\nbaz'])).toEqual(['foo bar']); + }); + + test('trailing new lines', () => { + expect(decodeChunks(['foo', ' bar', 'baz\n', 'thing\n'])).toEqual(['foo barbaz', 'thing']); + }); + + test('trailing new lines with \\r', () => { + expect(decodeChunks(['foo', ' bar', 'baz\r\n', 'thing\r\n'])).toEqual(['foo barbaz', 'thing']); + }); + + test('escaped new lines', () => { + expect(decodeChunks(['foo', ' bar\\nbaz\n'])).toEqual(['foo bar\\nbaz']); + }); + + test('escaped new lines with \\r', () => { + expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']); + }); +}); From e0deb2285fb35fac8096ebfe6ed5f9dcd1a8b7f0 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 6 Mar 2024 19:13:10 +0100 Subject: [PATCH 018/533] chore(types): fix accidental exposure of Buffer type to cloudflare (#709) --- src/streaming.ts | 13 ++++++++++++- tests/streaming.test.ts | 15 +-------------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/src/streaming.ts b/src/streaming.ts index 1b59bce20..7b0466a3c 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -267,7 +267,7 @@ class SSEDecoder { * * https://github.com/encode/httpx/blob/920333ea98118e9cf617f246905d7b202510941c/httpx/_decoders.py#L258 */ -export class LineDecoder { +class LineDecoder { // prettier-ignore static NEWLINE_CHARS = new Set(['\n', '\r', '\x0b', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029']); static NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g; @@ -372,6 +372,17 @@ export class LineDecoder { } } +/** This is an internal helper function that's just used for testing */ +export function _decodeChunks(chunks: string[]): string[] { + const decoder = new LineDecoder(); + const lines = []; + for (const chunk of chunks) { + lines.push(...decoder.decode(chunk)); + } + + return lines; +} + function partition(str: string, delimiter: string): [string, string, string] { const index = str.indexOf(delimiter); if (index !== -1) { diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts index 45cf6f6cd..479b2a341 100644 --- a/tests/streaming.test.ts +++ b/tests/streaming.test.ts @@ -1,17 +1,4 @@ -import { LineDecoder } from 'openai/streaming'; - -function decodeChunks(chunks: string[], decoder?: LineDecoder): string[] { - if (!decoder) { - decoder = new LineDecoder(); - } - - const lines = []; - for (const chunk of chunks) { - lines.push(...decoder.decode(chunk)); - } - - return lines; -} +import { _decodeChunks as decodeChunks } from 'openai/streaming'; describe('line decoder', () => { test('basic', () => { From 0825acf85cd50d02b63a875481aadd5ec6cc6aad Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 6 Mar 2024 21:12:48 +0100 Subject: [PATCH 019/533] docs: remove extraneous --save and yarn install instructions (#710) --- CONTRIBUTING.md | 4 +--- README.md | 5 +---- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 297322d17..d9e64025d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -42,9 +42,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```bash -npm install --save git+ssh://git@github.com:openai/openai-node.git -# or -yarn add git+ssh://git@github.com:openai/openai-node.git +npm install git+ssh://git@github.com:openai/openai-node.git ``` Alternatively, to link a local copy of the repo: diff --git a/README.md b/README.md index 1207d5d24..28262aaca 100644 --- a/README.md +++ b/README.md @@ -11,10 +11,7 @@ To learn how to use the OpenAI API, check out our [API Reference](https://platfo ## Installation ```sh -# install from NPM -npm install --save openai -# or -yarn add openai +npm install openai ``` You can import in Deno via: From 50206a06974d558d9df7d8649cc2c71822e67472 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 7 Mar 2024 19:13:22 +0100 Subject: [PATCH 020/533] docs: use @deprecated decorator for deprecated params (#711) --- src/resources/chat/completions.ts | 30 ++++++++++++++++++------------ src/resources/files.ts | 8 ++++---- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 44627eb85..c2d6da0be 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -133,8 +133,8 @@ export interface ChatCompletionAssistantMessageParam { content?: string | null; /** - * Deprecated and replaced by `tool_calls`. The name and arguments of a function - * that should be called, as generated by the model. + * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of + * a function that should be called, as generated by the model. */ function_call?: ChatCompletionAssistantMessageParam.FunctionCall; @@ -152,8 +152,8 @@ export interface ChatCompletionAssistantMessageParam { export namespace ChatCompletionAssistantMessageParam { /** - * Deprecated and replaced by `tool_calls`. The name and arguments of a function - * that should be called, as generated by the model. + * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of + * a function that should be called, as generated by the model. */ export interface FunctionCall { /** @@ -250,8 +250,8 @@ export namespace ChatCompletionChunk { content?: string | null; /** - * Deprecated and replaced by `tool_calls`. The name and arguments of a function - * that should be called, as generated by the model. + * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of + * a function that should be called, as generated by the model. */ function_call?: Delta.FunctionCall; @@ -265,8 +265,8 @@ export namespace ChatCompletionChunk { export namespace Delta { /** - * Deprecated and replaced by `tool_calls`. The name and arguments of a function - * that should be called, as generated by the model. + * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of + * a function that should be called, as generated by the model. */ export interface FunctionCall { /** @@ -378,6 +378,9 @@ export interface ChatCompletionFunctionCallOption { name: string; } +/** + * @deprecated + */ export interface ChatCompletionFunctionMessageParam { /** * The contents of the function message. @@ -410,8 +413,8 @@ export interface ChatCompletionMessage { role: 'assistant'; /** - * Deprecated and replaced by `tool_calls`. The name and arguments of a function - * that should be called, as generated by the model. + * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of + * a function that should be called, as generated by the model. */ function_call?: ChatCompletionMessage.FunctionCall; @@ -423,8 +426,8 @@ export interface ChatCompletionMessage { export namespace ChatCompletionMessage { /** - * Deprecated and replaced by `tool_calls`. The name and arguments of a function - * that should be called, as generated by the model. + * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of + * a function that should be called, as generated by the model. */ export interface FunctionCall { /** @@ -855,6 +858,9 @@ export interface ChatCompletionCreateParamsBase { } export namespace ChatCompletionCreateParams { + /** + * @deprecated + */ export interface Function { /** * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain diff --git a/src/resources/files.ts b/src/resources/files.ts index db8f3a66a..cda487a63 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -154,14 +154,14 @@ export interface FileObject { purpose: 'fine-tune' | 'fine-tune-results' | 'assistants' | 'assistants_output'; /** - * Deprecated. The current status of the file, which can be either `uploaded`, - * `processed`, or `error`. + * @deprecated: Deprecated. The current status of the file, which can be either + * `uploaded`, `processed`, or `error`. */ status: 'uploaded' | 'processed' | 'error'; /** - * Deprecated. For details on why a fine-tuning training file failed validation, - * see the `error` field on `fine_tuning.job`. + * @deprecated: Deprecated. For details on why a fine-tuning training file failed + * validation, see the `error` field on `fine_tuning.job`. */ status_details?: string; } From c71ad7062dc778a3675b104650b21877e811956b Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 7 Mar 2024 21:10:51 +0100 Subject: [PATCH 021/533] chore(internal): add explicit type annotation to decoder (#712) --- src/streaming.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/streaming.ts b/src/streaming.ts index 7b0466a3c..f90c5d89a 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -375,7 +375,7 @@ class LineDecoder { /** This is an internal helper function that's just used for testing */ export function _decodeChunks(chunks: string[]): string[] { const decoder = new LineDecoder(); - const lines = []; + const lines: string[] = []; for (const chunk of chunks) { lines.push(...decoder.decode(chunk)); } From beea0c7c6b6b8611f3b95c02fb35e74855f7ba03 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 13 Mar 2024 01:06:20 -0400 Subject: [PATCH 022/533] release: 4.28.5 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 27 +++++++++++++++++++++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 32 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 5934251e9..2813cb972 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.28.4" + ".": "4.28.5" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 68ebe3767..8798e4b66 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ # Changelog +## 4.28.5 (2024-03-13) + +Full Changelog: [v4.28.4...v4.28.5](https://github.com/openai/openai-node/compare/v4.28.4...v4.28.5) + +### Bug Fixes + +* **ChatCompletionStream:** abort on async iterator break and handle errors ([#699](https://github.com/openai/openai-node/issues/699)) ([ac417a2](https://github.com/openai/openai-node/commit/ac417a2db31919d2b52f2eb2e38f9c67a8f73254)) +* **streaming:** correctly handle trailing new lines in byte chunks ([#708](https://github.com/openai/openai-node/issues/708)) ([4753be2](https://github.com/openai/openai-node/commit/4753be272b1d1dade7a769cf350b829fc639f36e)) + + +### Chores + +* **api:** update docs ([#703](https://github.com/openai/openai-node/issues/703)) ([e1db98b](https://github.com/openai/openai-node/commit/e1db98bef29d200e2e401e3f5d7b2db6839c7836)) +* **docs:** mention install from git repo ([#700](https://github.com/openai/openai-node/issues/700)) ([c081bdb](https://github.com/openai/openai-node/commit/c081bdbb55585e63370496d324dc6f94d86424d1)) +* fix error handler in readme ([#704](https://github.com/openai/openai-node/issues/704)) ([4ff790a](https://github.com/openai/openai-node/commit/4ff790a67cf876191e04ad0e369e447e080b78a7)) +* **internal:** add explicit type annotation to decoder ([#712](https://github.com/openai/openai-node/issues/712)) ([d728e99](https://github.com/openai/openai-node/commit/d728e9923554e4c72c9efa3bd528561400d50ad8)) +* **types:** fix accidental exposure of Buffer type to cloudflare ([#709](https://github.com/openai/openai-node/issues/709)) ([0323ecb](https://github.com/openai/openai-node/commit/0323ecb98ddbd8910fc5719c8bab5175b945d2ab)) + + +### Documentation + +* **contributing:** improve wording ([#696](https://github.com/openai/openai-node/issues/696)) ([940d569](https://github.com/openai/openai-node/commit/940d5695f4cacddbb58e3bfc50fec28c468c7e63)) +* **readme:** fix https proxy example ([#705](https://github.com/openai/openai-node/issues/705)) ([d144789](https://github.com/openai/openai-node/commit/d1447890a556d37928b628f6449bb80de224d207)) +* **readme:** fix typo in custom fetch implementation ([#698](https://github.com/openai/openai-node/issues/698)) ([64041fd](https://github.com/openai/openai-node/commit/64041fd33da569eccae64afe4e50ee803017b20b)) +* remove extraneous --save and yarn install instructions ([#710](https://github.com/openai/openai-node/issues/710)) ([8ec216d](https://github.com/openai/openai-node/commit/8ec216d6b72ee4d67e26786f06c93af18d042117)) +* use [@deprecated](https://github.com/deprecated) decorator for deprecated params ([#711](https://github.com/openai/openai-node/issues/711)) ([4688ef4](https://github.com/openai/openai-node/commit/4688ef4b36e9f383a3abf6cdb31d498163a7bb9e)) + ## 4.28.4 (2024-02-28) Full Changelog: [v4.28.3...v4.28.4](https://github.com/openai/openai-node/compare/v4.28.3...v4.28.4) diff --git a/README.md b/README.md index 28262aaca..24d38ac79 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.28.4/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.28.5/mod.ts'; ``` diff --git a/build-deno b/build-deno index 74d994d08..fb739cc50 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.28.4/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.28.5/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 65d6046f6..d51c4ca96 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.28.4", + "version": "4.28.5", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 9dd894067..516e764d1 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.28.4'; // x-release-please-version +export const VERSION = '4.28.5'; // x-release-please-version From 7d27d286876d0a575d91a4752f401126fe93d2a3 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 13 Mar 2024 16:30:47 -0400 Subject: [PATCH 023/533] feat(assistants): add support for streaming (#714) See the reference docs for more information: https://platform.openai.com/docs/api-reference/assistants-streaming We've also improved some of the names for the types in the assistants beta, non exhaustive list: - `CodeToolCall` -> `CodeInterpreterToolCall` - `MessageContentImageFile` -> `ImageFileContentBlock` - `MessageContentText` -> `TextContentBlock` - `ThreadMessage` -> `Message` - `ThreadMessageDeleted` -> `MessageDeleted` --- api.md | 58 +- examples/assistant-stream-raw.ts | 39 + examples/assistant-stream.ts | 48 + examples/assistants.ts | 57 ++ src/index.ts | 1 + src/lib/AbstractAssistantStreamRunner.ts | 340 +++++++ src/lib/AssistantStream.ts | 698 +++++++++++++++ src/resources/beta/assistants/assistants.ts | 844 ++++++++++++++++-- src/resources/beta/assistants/index.ts | 9 + src/resources/beta/beta.ts | 12 + src/resources/beta/index.ts | 12 + src/resources/beta/threads/index.ts | 35 +- src/resources/beta/threads/messages/index.ts | 26 +- .../beta/threads/messages/messages.ts | 426 +++++++-- src/resources/beta/threads/runs/index.ts | 19 +- src/resources/beta/threads/runs/runs.ts | 281 ++++-- src/resources/beta/threads/runs/steps.ts | 259 +++++- src/resources/beta/threads/threads.ts | 207 ++++- src/resources/chat/completions.ts | 2 +- src/resources/completions.ts | 2 + src/resources/shared.ts | 10 + src/streaming.ts | 14 + .../beta/threads/runs/runs.test.ts | 2 + .../beta/threads/threads.test.ts | 1 + tests/streaming/assistants/assistant.test.ts | 32 + 25 files changed, 3155 insertions(+), 279 deletions(-) create mode 100644 examples/assistant-stream-raw.ts create mode 100644 examples/assistant-stream.ts create mode 100644 examples/assistants.ts create mode 100644 src/lib/AbstractAssistantStreamRunner.ts create mode 100644 src/lib/AssistantStream.ts create mode 100644 tests/streaming/assistants/assistant.test.ts diff --git a/api.md b/api.md index ff3180cba..504a103c7 100644 --- a/api.md +++ b/api.md @@ -2,6 +2,7 @@ Types: +- ErrorObject - FunctionDefinition - FunctionParameters @@ -177,6 +178,15 @@ Types: - Assistant - AssistantDeleted +- AssistantStreamEvent +- AssistantTool +- CodeInterpreterTool +- FunctionTool +- MessageStreamEvent +- RetrievalTool +- RunStepStreamEvent +- RunStreamEvent +- ThreadStreamEvent Methods: @@ -214,6 +224,7 @@ Methods: - client.beta.threads.update(threadId, { ...params }) -> Thread - client.beta.threads.del(threadId) -> ThreadDeleted - client.beta.threads.createAndRun({ ...params }) -> Run +- client.beta.threads.createAndRunStream(body, options?) -> AssistantStream ### Runs @@ -231,16 +242,29 @@ Methods: - client.beta.threads.runs.list(threadId, { ...params }) -> RunsPage - client.beta.threads.runs.cancel(threadId, runId) -> Run - client.beta.threads.runs.submitToolOutputs(threadId, runId, { ...params }) -> Run +- client.beta.threads.runs.createAndStream(threadId, body, options?) -> AssistantStream +- client.beta.threads.runs.submitToolOutputsStream(threadId, runId, body, options?) -> AssistantStream #### Steps Types: -- CodeToolCall +- CodeInterpreterLogs +- CodeInterpreterOutputImage +- CodeInterpreterToolCall +- CodeInterpreterToolCallDelta - FunctionToolCall +- FunctionToolCallDelta - MessageCreationStepDetails - RetrievalToolCall +- RetrievalToolCallDelta - RunStep +- RunStepDelta +- RunStepDeltaEvent +- RunStepDeltaMessageDelta +- ToolCall +- ToolCallDelta +- ToolCallDeltaObject - ToolCallsStepDetails Methods: @@ -252,17 +276,33 @@ Methods: Types: -- MessageContentImageFile -- MessageContentText -- ThreadMessage -- ThreadMessageDeleted +- Annotation +- AnnotationDelta +- FileCitationAnnotation +- FileCitationDeltaAnnotation +- FilePathAnnotation +- FilePathDeltaAnnotation +- ImageFile +- ImageFileContentBlock +- ImageFileDelta +- ImageFileDeltaBlock +- Message +- MessageContent +- MessageContentDelta +- MessageDeleted +- MessageDelta +- MessageDeltaEvent +- Text +- TextContentBlock +- TextDelta +- TextDeltaBlock Methods: -- client.beta.threads.messages.create(threadId, { ...params }) -> ThreadMessage -- client.beta.threads.messages.retrieve(threadId, messageId) -> ThreadMessage -- client.beta.threads.messages.update(threadId, messageId, { ...params }) -> ThreadMessage -- client.beta.threads.messages.list(threadId, { ...params }) -> ThreadMessagesPage +- client.beta.threads.messages.create(threadId, { ...params }) -> Message +- client.beta.threads.messages.retrieve(threadId, messageId) -> Message +- client.beta.threads.messages.update(threadId, messageId, { ...params }) -> Message +- client.beta.threads.messages.list(threadId, { ...params }) -> MessagesPage #### Files diff --git a/examples/assistant-stream-raw.ts b/examples/assistant-stream-raw.ts new file mode 100644 index 000000000..a882d219a --- /dev/null +++ b/examples/assistant-stream-raw.ts @@ -0,0 +1,39 @@ +import OpenAI from 'openai'; + +const openai = new OpenAI(); + +async function main() { + const assistant = await openai.beta.assistants.create({ + model: 'gpt-4-1106-preview', + name: 'Math Tutor', + instructions: 'You are a personal math tutor. Write and run code to answer math questions.', + }); + + const thread = await openai.beta.threads.create({ + messages: [ + { + role: 'user', + content: '"I need to solve the equation `3x + 11 = 14`. Can you help me?"', + }, + ], + }); + + const stream = await openai.beta.threads.runs.create(thread.id, { + assistant_id: assistant.id, + additional_instructions: 'Please address the user as Jane Doe. The user has a premium account.', + stream: true, + }); + + for await (const event of stream) { + if (event.event === 'thread.message.delta') { + const chunk = event.data.delta.content?.[0]; + if (chunk && 'text' in chunk) { + process.stdout.write(chunk.text.value); + } + } + } + + console.log(); +} + +main(); diff --git a/examples/assistant-stream.ts b/examples/assistant-stream.ts new file mode 100644 index 000000000..36c4ed152 --- /dev/null +++ b/examples/assistant-stream.ts @@ -0,0 +1,48 @@ +#!/usr/bin/env -S npm run tsn -T + +import OpenAI from 'openai'; + +/** + * Example of streaming a response from an assistant + */ + +const openai = new OpenAI(); + +async function main() { + const assistant = await openai.beta.assistants.create({ + model: 'gpt-4-1106-preview', + name: 'Math Tutor', + instructions: 'You are a personal math tutor. Write and run code to answer math questions.', + }); + + let assistantId = assistant.id; + console.log('Created Assistant with Id: ' + assistantId); + + const thread = await openai.beta.threads.create({ + messages: [ + { + role: 'user', + content: '"I need to solve the equation `3x + 11 = 14`. Can you help me?"', + }, + ], + }); + + let threadId = thread.id; + console.log('Created thread with Id: ' + threadId); + + const run = openai.beta.threads.runs + .createAndStream(threadId, { + assistant_id: assistantId, + }) + //Subscribe to streaming events and log them + .on('event', (event) => console.log(event)) + .on('textDelta', (delta, snapshot) => console.log(snapshot)) + .on('messageDelta', (delta, snapshot) => console.log(snapshot)) + .on('run', (run) => console.log(run)) + .on('messageDelta', (delta, snapshot) => console.log(snapshot)) + .on('connect', () => console.log()); + const result = await run.finalRun(); + console.log('Run Result' + result); +} + +main(); diff --git a/examples/assistants.ts b/examples/assistants.ts new file mode 100644 index 000000000..bbc2f80ce --- /dev/null +++ b/examples/assistants.ts @@ -0,0 +1,57 @@ +#!/usr/bin/env -S npm run tsn -T + +import OpenAI from 'openai'; +import { sleep } from 'openai/core'; + +/** + * Example of polling for a complete response from an assistant + */ + +const openai = new OpenAI(); + +async function main() { + const assistant = await openai.beta.assistants.create({ + model: 'gpt-4-1106-preview', + name: 'Math Tutor', + instructions: 'You are a personal math tutor. Write and run code to answer math questions.', + // tools = [], + }); + + let assistantId = assistant.id; + console.log('Created Assistant with Id: ' + assistantId); + + const thread = await openai.beta.threads.create({ + messages: [ + { + role: 'user', + content: '"I need to solve the equation `3x + 11 = 14`. Can you help me?"', + }, + ], + }); + + let threadId = thread.id; + console.log('Created thread with Id: ' + threadId); + + const run = await openai.beta.threads.runs.create(thread.id, { + assistant_id: assistantId, + additional_instructions: 'Please address the user as Jane Doe. The user has a premium account.', + }); + + console.log('Created run with Id: ' + run.id); + + while (true) { + const result = await openai.beta.threads.runs.retrieve(thread.id, run.id); + if (result.status == 'completed') { + const messages = await openai.beta.threads.messages.list(thread.id); + for (const message of messages.getPaginatedItems()) { + console.log(message); + } + break; + } else { + console.log('Waiting for completion. Current status: ' + result.status); + await sleep(5000); + } + } +} + +main(); diff --git a/src/index.ts b/src/index.ts index 80bf95b0d..7b3033fa9 100644 --- a/src/index.ts +++ b/src/index.ts @@ -285,6 +285,7 @@ export namespace OpenAI { export import Beta = API.Beta; + export import ErrorObject = API.ErrorObject; export import FunctionDefinition = API.FunctionDefinition; export import FunctionParameters = API.FunctionParameters; } diff --git a/src/lib/AbstractAssistantStreamRunner.ts b/src/lib/AbstractAssistantStreamRunner.ts new file mode 100644 index 000000000..b600f0df3 --- /dev/null +++ b/src/lib/AbstractAssistantStreamRunner.ts @@ -0,0 +1,340 @@ +import * as Core from 'openai/core'; +import { APIUserAbortError, OpenAIError } from 'openai/error'; +import { Run, RunSubmitToolOutputsParamsBase } from 'openai/resources/beta/threads/runs/runs'; +import { RunCreateParamsBase, Runs } from 'openai/resources/beta/threads/runs/runs'; +import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads'; + +export abstract class AbstractAssistantStreamRunner< + Events extends CustomEvents = AbstractAssistantRunnerEvents, +> { + controller: AbortController = new AbortController(); + + #connectedPromise: Promise; + #resolveConnectedPromise: () => void = () => {}; + #rejectConnectedPromise: (error: OpenAIError) => void = () => {}; + + #endPromise: Promise; + #resolveEndPromise: () => void = () => {}; + #rejectEndPromise: (error: OpenAIError) => void = () => {}; + + #listeners: { [Event in keyof Events]?: ListenersForEvent } = {}; + + #ended = false; + #errored = false; + #aborted = false; + #catchingPromiseCreated = false; + + constructor() { + this.#connectedPromise = new Promise((resolve, reject) => { + this.#resolveConnectedPromise = resolve; + this.#rejectConnectedPromise = reject; + }); + + this.#endPromise = new Promise((resolve, reject) => { + this.#resolveEndPromise = resolve; + this.#rejectEndPromise = reject; + }); + + // Don't let these promises cause unhandled rejection errors. + // we will manually cause an unhandled rejection error later + // if the user hasn't registered any error listener or called + // any promise-returning method. + this.#connectedPromise.catch(() => {}); + this.#endPromise.catch(() => {}); + } + + protected _run(executor: () => Promise) { + // Unfortunately if we call `executor()` immediately we get runtime errors about + // references to `this` before the `super()` constructor call returns. + setTimeout(() => { + executor().then(() => { + // this._emitFinal(); + this._emit('end'); + }, this.#handleError); + }, 0); + } + + protected _addRun(run: Run): Run { + return run; + } + + protected _connected() { + if (this.ended) return; + this.#resolveConnectedPromise(); + this._emit('connect'); + } + + get ended(): boolean { + return this.#ended; + } + + get errored(): boolean { + return this.#errored; + } + + get aborted(): boolean { + return this.#aborted; + } + + abort() { + this.controller.abort(); + } + + /** + * Adds the listener function to the end of the listeners array for the event. + * No checks are made to see if the listener has already been added. Multiple calls passing + * the same combination of event and listener will result in the listener being added, and + * called, multiple times. + * @returns this ChatCompletionStream, so that calls can be chained + */ + on(event: Event, listener: ListenerForEvent): this { + const listeners: ListenersForEvent = + this.#listeners[event] || (this.#listeners[event] = []); + listeners.push({ listener }); + return this; + } + + /** + * Removes the specified listener from the listener array for the event. + * off() will remove, at most, one instance of a listener from the listener array. If any single + * listener has been added multiple times to the listener array for the specified event, then + * off() must be called multiple times to remove each instance. + * @returns this ChatCompletionStream, so that calls can be chained + */ + off(event: Event, listener: ListenerForEvent): this { + const listeners = this.#listeners[event]; + if (!listeners) return this; + const index = listeners.findIndex((l) => l.listener === listener); + if (index >= 0) listeners.splice(index, 1); + return this; + } + + /** + * Adds a one-time listener function for the event. The next time the event is triggered, + * this listener is removed and then invoked. + * @returns this ChatCompletionStream, so that calls can be chained + */ + once(event: Event, listener: ListenerForEvent): this { + const listeners: ListenersForEvent = + this.#listeners[event] || (this.#listeners[event] = []); + listeners.push({ listener, once: true }); + return this; + } + + /** + * This is similar to `.once()`, but returns a Promise that resolves the next time + * the event is triggered, instead of calling a listener callback. + * @returns a Promise that resolves the next time given event is triggered, + * or rejects if an error is emitted. (If you request the 'error' event, + * returns a promise that resolves with the error). + * + * Example: + * + * const message = await stream.emitted('message') // rejects if the stream errors + */ + emitted( + event: Event, + ): Promise< + EventParameters extends [infer Param] ? Param + : EventParameters extends [] ? void + : EventParameters + > { + return new Promise((resolve, reject) => { + this.#catchingPromiseCreated = true; + if (event !== 'error') this.once('error', reject); + this.once(event, resolve as any); + }); + } + + async done(): Promise { + this.#catchingPromiseCreated = true; + await this.#endPromise; + } + + #handleError = (error: unknown) => { + this.#errored = true; + if (error instanceof Error && error.name === 'AbortError') { + error = new APIUserAbortError(); + } + if (error instanceof APIUserAbortError) { + this.#aborted = true; + return this._emit('abort', error); + } + if (error instanceof OpenAIError) { + return this._emit('error', error); + } + if (error instanceof Error) { + const openAIError: OpenAIError = new OpenAIError(error.message); + // @ts-ignore + openAIError.cause = error; + return this._emit('error', openAIError); + } + return this._emit('error', new OpenAIError(String(error))); + }; + + protected _emit(event: Event, ...args: EventParameters) { + // make sure we don't emit any events after end + if (this.#ended) { + return; + } + + if (event === 'end') { + this.#ended = true; + this.#resolveEndPromise(); + } + + const listeners: ListenersForEvent | undefined = this.#listeners[event]; + if (listeners) { + this.#listeners[event] = listeners.filter((l) => !l.once) as any; + listeners.forEach(({ listener }: any) => listener(...args)); + } + + if (event === 'abort') { + const error = args[0] as APIUserAbortError; + if (!this.#catchingPromiseCreated && !listeners?.length) { + Promise.reject(error); + } + this.#rejectConnectedPromise(error); + this.#rejectEndPromise(error); + this._emit('end'); + return; + } + + if (event === 'error') { + // NOTE: _emit('error', error) should only be called from #handleError(). + + const error = args[0] as OpenAIError; + if (!this.#catchingPromiseCreated && !listeners?.length) { + // Trigger an unhandled rejection if the user hasn't registered any error handlers. + // If you are seeing stack traces here, make sure to handle errors via either: + // - runner.on('error', () => ...) + // - await runner.done() + // - await runner.finalChatCompletion() + // - etc. + Promise.reject(error); + } + this.#rejectConnectedPromise(error); + this.#rejectEndPromise(error); + this._emit('end'); + } + } + + protected async _threadAssistantStream( + body: ThreadCreateAndRunParamsBase, + thread: Threads, + options?: Core.RequestOptions, + ): Promise { + return await this._createThreadAssistantStream(thread, body, options); + } + + protected async _runAssistantStream( + threadId: string, + runs: Runs, + params: RunCreateParamsBase, + options?: Core.RequestOptions, + ): Promise { + return await this._createAssistantStream(runs, threadId, params, options); + } + + protected async _runToolAssistantStream( + threadId: string, + runId: string, + runs: Runs, + params: RunSubmitToolOutputsParamsBase, + options?: Core.RequestOptions, + ): Promise { + return await this._createToolAssistantStream(runs, threadId, runId, params, options); + } + + protected async _createThreadAssistantStream( + thread: Threads, + body: ThreadCreateAndRunParamsBase, + options?: Core.RequestOptions, + ): Promise { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + // this.#validateParams(params); + + const runResult = await thread.createAndRun( + { ...body, stream: false }, + { ...options, signal: this.controller.signal }, + ); + this._connected(); + return this._addRun(runResult as Run); + } + + protected async _createToolAssistantStream( + run: Runs, + threadId: string, + runId: string, + params: RunSubmitToolOutputsParamsBase, + options?: Core.RequestOptions, + ): Promise { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + + const runResult = await run.submitToolOutputs( + threadId, + runId, + { ...params, stream: false }, + { ...options, signal: this.controller.signal }, + ); + this._connected(); + return this._addRun(runResult as Run); + } + + protected async _createAssistantStream( + run: Runs, + threadId: string, + params: RunCreateParamsBase, + options?: Core.RequestOptions, + ): Promise { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + // this.#validateParams(params); + + const runResult = await run.create( + threadId, + { ...params, stream: false }, + { ...options, signal: this.controller.signal }, + ); + this._connected(); + return this._addRun(runResult as Run); + } +} + +type CustomEvents = { + [k in Event]: k extends keyof AbstractAssistantRunnerEvents ? AbstractAssistantRunnerEvents[k] + : (...args: any[]) => void; +}; + +type ListenerForEvent, Event extends keyof Events> = Event extends ( + keyof AbstractAssistantRunnerEvents +) ? + AbstractAssistantRunnerEvents[Event] +: Events[Event]; + +type ListenersForEvent, Event extends keyof Events> = Array<{ + listener: ListenerForEvent; + once?: boolean; +}>; +type EventParameters, Event extends keyof Events> = Parameters< + ListenerForEvent +>; + +export interface AbstractAssistantRunnerEvents { + connect: () => void; + run: (run: Run) => void; + error: (error: OpenAIError) => void; + abort: (error: APIUserAbortError) => void; + end: () => void; +} diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts new file mode 100644 index 000000000..d70cb7358 --- /dev/null +++ b/src/lib/AssistantStream.ts @@ -0,0 +1,698 @@ +import { + TextContentBlock, + ImageFileContentBlock, + Message, + MessageContentDelta, + Text, + ImageFile, + TextDelta, + Messages, +} from 'openai/resources/beta/threads/messages/messages'; +import * as Core from 'openai/core'; +import { RequestOptions } from 'openai/core'; +import { + Run, + RunCreateParamsBase, + RunCreateParamsStreaming, + Runs, + RunSubmitToolOutputsParamsBase, + RunSubmitToolOutputsParamsStreaming, +} from 'openai/resources/beta/threads/runs/runs'; +import { + AbstractAssistantRunnerEvents, + AbstractAssistantStreamRunner, +} from './AbstractAssistantStreamRunner'; +import { type ReadableStream } from 'openai/_shims/index'; +import { Stream } from 'openai/streaming'; +import { APIUserAbortError, OpenAIError } from 'openai/error'; +import { + AssistantStreamEvent, + MessageStreamEvent, + RunStepStreamEvent, + RunStreamEvent, +} from 'openai/resources/beta/assistants/assistants'; +import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from 'openai/resources/beta/threads/runs/steps'; +import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads'; +import MessageDelta = Messages.MessageDelta; + +export interface AssistantStreamEvents extends AbstractAssistantRunnerEvents { + //New event structure + messageCreated: (message: Message) => void; + messageDelta: (message: MessageDelta, snapshot: Message) => void; + messageDone: (message: Message) => void; + + runStepCreated: (runStep: RunStep) => void; + runStepDelta: (delta: RunStepDelta, snapshot: Runs.RunStep) => void; + runStepDone: (runStep: Runs.RunStep, snapshot: Runs.RunStep) => void; + + toolCallCreated: (toolCall: ToolCall) => void; + toolCallDelta: (delta: ToolCallDelta, snapshot: ToolCall) => void; + toolCallDone: (toolCall: ToolCall) => void; + + textCreated: (content: Text) => void; + textDelta: (delta: TextDelta, snapshot: Text) => void; + textDone: (content: Text, snapshot: Message) => void; + + //No created or delta as this is not streamed + imageFileDone: (content: ImageFile, snapshot: Message) => void; + + end: () => void; + + event: (event: AssistantStreamEvent) => void; +} + +export type ThreadCreateAndRunParamsBaseStream = Omit & { + stream?: true; +}; + +export type RunCreateParamsBaseStream = Omit & { + stream?: true; +}; + +export type RunSubmitToolOutputsParamsStream = Omit & { + stream?: true; +}; + +export class AssistantStream + extends AbstractAssistantStreamRunner + implements AsyncIterable +{ + //Track all events in a single list for reference + #events: AssistantStreamEvent[] = []; + + //Used to accumulate deltas + //We are accumulating many types so the value here is not strict + #runStepSnapshots: { [id: string]: Runs.RunStep } = {}; + #messageSnapshots: { [id: string]: Message } = {}; + #messageSnapshot: Message | undefined; + #finalRun: Run | undefined; + #currentContentIndex: number | undefined; + #currentContent: TextContentBlock | ImageFileContentBlock | undefined; + #currentToolCallIndex: number | undefined; + #currentToolCall: ToolCall | undefined; + + //For current snapshot methods + #currentEvent: AssistantStreamEvent | undefined; + #currentRunSnapshot: Run | undefined; + #currentRunStepSnapshot: Runs.RunStep | undefined; + + [Symbol.asyncIterator](): AsyncIterator { + const pushQueue: AssistantStreamEvent[] = []; + const readQueue: { + resolve: (chunk: AssistantStreamEvent | undefined) => void; + reject: (err: unknown) => void; + }[] = []; + let done = false; + + //Catch all for passing along all events + this.on('event', (event) => { + const reader = readQueue.shift(); + if (reader) { + reader.resolve(event); + } else { + pushQueue.push(event); + } + }); + + this.on('end', () => { + done = true; + for (const reader of readQueue) { + reader.resolve(undefined); + } + readQueue.length = 0; + }); + + this.on('abort', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + this.on('error', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + return { + next: async (): Promise> => { + if (!pushQueue.length) { + if (done) { + return { value: undefined, done: true }; + } + return new Promise((resolve, reject) => + readQueue.push({ resolve, reject }), + ).then((chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true })); + } + const chunk = pushQueue.shift()!; + return { value: chunk, done: false }; + }, + return: async () => { + this.abort(); + return { value: undefined, done: true }; + }, + }; + } + + toReadableStream(): ReadableStream { + const stream = new Stream(this[Symbol.asyncIterator].bind(this), this.controller); + return stream.toReadableStream(); + } + + static createToolAssistantStream( + threadId: string, + runId: string, + runs: Runs, + body: RunSubmitToolOutputsParamsStream, + options: RequestOptions | undefined, + ) { + const runner = new AssistantStream(); + runner._run(() => + runner._runToolAssistantStream(threadId, runId, runs, body, { + ...options, + headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' }, + }), + ); + return runner; + } + + protected override async _createToolAssistantStream( + run: Runs, + threadId: string, + runId: string, + params: RunSubmitToolOutputsParamsStream, + options?: Core.RequestOptions, + ): Promise { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + + const body: RunSubmitToolOutputsParamsStreaming = { ...params, stream: true }; + const stream = await run.submitToolOutputs(threadId, runId, body, { + ...options, + signal: this.controller.signal, + }); + + this._connected(); + + for await (const event of stream) { + this.#addEvent(event); + } + if (stream.controller.signal?.aborted) { + throw new APIUserAbortError(); + } + + return this._addRun(this.#endRequest()); + } + + static createThreadAssistantStream( + body: ThreadCreateAndRunParamsBaseStream, + thread: Threads, + options?: RequestOptions, + ) { + const runner = new AssistantStream(); + runner._run(() => + runner._threadAssistantStream(body, thread, { + ...options, + headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' }, + }), + ); + return runner; + } + + static createAssistantStream( + threadId: string, + runs: Runs, + params: RunCreateParamsBaseStream, + options?: RequestOptions, + ) { + const runner = new AssistantStream(); + runner._run(() => + runner._runAssistantStream(threadId, runs, params, { + ...options, + headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' }, + }), + ); + return runner; + } + + currentEvent(): AssistantStreamEvent | undefined { + return this.#currentEvent; + } + + currentRun(): Run | undefined { + return this.#currentRunSnapshot; + } + + currentMessageSnapshot(): Message | undefined { + return this.#messageSnapshot; + } + + currentRunStepSnapshot(): Runs.RunStep | undefined { + return this.#currentRunStepSnapshot; + } + + async finalRunSteps(): Promise { + await this.done(); + + return Object.values(this.#runStepSnapshots); + } + + async finalMessages(): Promise { + await this.done(); + + return Object.values(this.#messageSnapshots); + } + + async finalRun(): Promise { + await this.done(); + if (!this.#finalRun) throw Error('Final run was not received.'); + + return this.#finalRun; + } + + protected override async _createThreadAssistantStream( + thread: Threads, + params: ThreadCreateAndRunParamsBase, + options?: Core.RequestOptions, + ): Promise { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + + const body: RunCreateParamsStreaming = { ...params, stream: true }; + const stream = await thread.createAndRun(body, { ...options, signal: this.controller.signal }); + + this._connected(); + + for await (const event of stream) { + this.#addEvent(event); + } + if (stream.controller.signal?.aborted) { + throw new APIUserAbortError(); + } + + return this._addRun(this.#endRequest()); + } + + protected override async _createAssistantStream( + run: Runs, + threadId: string, + params: RunCreateParamsBase, + options?: Core.RequestOptions, + ): Promise { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + + const body: RunCreateParamsStreaming = { ...params, stream: true }; + const stream = await run.create(threadId, body, { ...options, signal: this.controller.signal }); + + this._connected(); + + for await (const event of stream) { + this.#addEvent(event); + } + if (stream.controller.signal?.aborted) { + throw new APIUserAbortError(); + } + + return this._addRun(this.#endRequest()); + } + + #addEvent(event: AssistantStreamEvent) { + if (this.ended) return; + + this.#currentEvent = event; + + this.#handleEvent(event); + + switch (event.event) { + case 'thread.created': + //No action on this event. + break; + + case 'thread.run.created': + case 'thread.run.queued': + case 'thread.run.in_progress': + case 'thread.run.requires_action': + case 'thread.run.completed': + case 'thread.run.failed': + case 'thread.run.cancelling': + case 'thread.run.cancelled': + case 'thread.run.expired': + this.#handleRun(event); + break; + + case 'thread.run.step.created': + case 'thread.run.step.in_progress': + case 'thread.run.step.delta': + case 'thread.run.step.completed': + case 'thread.run.step.failed': + case 'thread.run.step.cancelled': + case 'thread.run.step.expired': + this.#handleRunStep(event); + break; + + case 'thread.message.created': + case 'thread.message.in_progress': + case 'thread.message.delta': + case 'thread.message.completed': + case 'thread.message.incomplete': + this.#handleMessage(event); + break; + + case 'error': + //This is included for completeness, but errors are processed in the SSE event processing so this should not occur + throw new Error( + 'Encountered an error event in event processing - errors should be processed earlier', + ); + } + } + + #endRequest(): Run { + if (this.ended) { + throw new OpenAIError(`stream has ended, this shouldn't happen`); + } + + if (!this.#finalRun) throw Error('Final run has been been received'); + + return this.#finalRun; + } + + #handleMessage(event: MessageStreamEvent) { + const [accumulatedMessage, newContent] = this.#accumulateMessage(event, this.#messageSnapshot); + this.#messageSnapshot = accumulatedMessage; + this.#messageSnapshots[accumulatedMessage.id] = accumulatedMessage; + + for (const content of newContent) { + const snapshotContent = accumulatedMessage.content[content.index]; + if (snapshotContent?.type == 'text') { + this._emit('textCreated', snapshotContent.text); + } + } + + switch (event.event) { + case 'thread.message.created': + this._emit('messageCreated', event.data); + break; + + case 'thread.message.in_progress': + break; + + case 'thread.message.delta': + this._emit('messageDelta', event.data.delta, accumulatedMessage); + + if (event.data.delta.content) { + for (const content of event.data.delta.content) { + //If it is text delta, emit a text delta event + if (content.type == 'text' && content.text) { + let textDelta = content.text; + let snapshot = accumulatedMessage.content[content.index]; + if (snapshot && snapshot.type == 'text') { + this._emit('textDelta', textDelta, snapshot.text); + } else { + throw Error('The snapshot associated with this text delta is not text or missing'); + } + } + + if (content.index != this.#currentContentIndex) { + //See if we have in progress content + if (this.#currentContent) { + switch (this.#currentContent.type) { + case 'text': + this._emit('textDone', this.#currentContent.text, this.#messageSnapshot); + break; + case 'image_file': + this._emit('imageFileDone', this.#currentContent.image_file, this.#messageSnapshot); + break; + } + } + + this.#currentContentIndex = content.index; + } + + this.#currentContent = accumulatedMessage.content[content.index]; + } + } + + break; + + case 'thread.message.completed': + case 'thread.message.incomplete': + //We emit the latest content we were working on on completion (including incomplete) + if (this.#currentContentIndex !== undefined) { + const currentContent = event.data.content[this.#currentContentIndex]; + if (currentContent) { + switch (currentContent.type) { + case 'image_file': + this._emit('imageFileDone', currentContent.image_file, this.#messageSnapshot); + break; + case 'text': + this._emit('textDone', currentContent.text, this.#messageSnapshot); + break; + } + } + } + + if (this.#messageSnapshot) { + this._emit('messageDone', event.data); + } + + this.#messageSnapshot = undefined; + } + } + + #handleRunStep(event: RunStepStreamEvent) { + const accumulatedRunStep = this.#accumulateRunStep(event); + this.#currentRunStepSnapshot = accumulatedRunStep; + + switch (event.event) { + case 'thread.run.step.created': + this._emit('runStepCreated', event.data); + break; + case 'thread.run.step.delta': + const delta = event.data.delta; + if ( + delta.step_details && + delta.step_details.type == 'tool_calls' && + delta.step_details.tool_calls && + accumulatedRunStep.step_details.type == 'tool_calls' + ) { + for (const toolCall of delta.step_details.tool_calls) { + if (toolCall.index == this.#currentToolCallIndex) { + this._emit( + 'toolCallDelta', + toolCall, + accumulatedRunStep.step_details.tool_calls[toolCall.index] as ToolCall, + ); + } else { + if (this.#currentToolCall) { + this._emit('toolCallDone', this.#currentToolCall); + } + + this.#currentToolCallIndex = toolCall.index; + this.#currentToolCall = accumulatedRunStep.step_details.tool_calls[toolCall.index]; + if (this.#currentToolCall) this._emit('toolCallCreated', this.#currentToolCall); + } + } + } + + this._emit('runStepDelta', event.data.delta, accumulatedRunStep); + break; + case 'thread.run.step.completed': + case 'thread.run.step.failed': + case 'thread.run.step.cancelled': + case 'thread.run.step.expired': + this.#currentRunStepSnapshot = undefined; + const details = event.data.step_details; + if (details.type == 'tool_calls') { + if (this.#currentToolCall) { + this._emit('toolCallDone', this.#currentToolCall as ToolCall); + this.#currentToolCall = undefined; + } + } + this._emit('runStepDone', event.data, accumulatedRunStep); + break; + case 'thread.run.step.in_progress': + break; + } + } + + #handleEvent(event: AssistantStreamEvent) { + this.#events.push(event); + this._emit('event', event); + } + + #accumulateRunStep(event: RunStepStreamEvent): Runs.RunStep { + switch (event.event) { + case 'thread.run.step.created': + this.#runStepSnapshots[event.data.id] = event.data; + return event.data; + + case 'thread.run.step.delta': + let snapshot = this.#runStepSnapshots[event.data.id] as Runs.RunStep; + if (!snapshot) { + throw Error('Received a RunStepDelta before creation of a snapshot'); + } + + let data = event.data; + + if (data.delta) { + const accumulated = AssistantStream.accumulateDelta(snapshot, data.delta) as Runs.RunStep; + this.#runStepSnapshots[event.data.id] = accumulated; + } + + return this.#runStepSnapshots[event.data.id] as Runs.RunStep; + + case 'thread.run.step.completed': + case 'thread.run.step.failed': + case 'thread.run.step.cancelled': + case 'thread.run.step.expired': + case 'thread.run.step.in_progress': + this.#runStepSnapshots[event.data.id] = event.data; + break; + } + + if (this.#runStepSnapshots[event.data.id]) return this.#runStepSnapshots[event.data.id] as Runs.RunStep; + throw new Error('No snapshot available'); + } + + #accumulateMessage( + event: AssistantStreamEvent, + snapshot: Message | undefined, + ): [Message, MessageContentDelta[]] { + let newContent: MessageContentDelta[] = []; + + switch (event.event) { + case 'thread.message.created': + //On creation the snapshot is just the initial message + return [event.data, newContent]; + + case 'thread.message.delta': + if (!snapshot) { + throw Error( + 'Received a delta with no existing snapshot (there should be one from message creation)', + ); + } + + let data = event.data; + + //If this delta does not have content, nothing to process + if (data.delta.content) { + for (const contentElement of data.delta.content) { + if (contentElement.index in snapshot.content) { + let currentContent = snapshot.content[contentElement.index]; + snapshot.content[contentElement.index] = this.#accumulateContent( + contentElement, + currentContent, + ); + } else { + snapshot.content[contentElement.index] = contentElement as + | TextContentBlock + | ImageFileContentBlock; + //This is a new element + newContent.push(contentElement); + } + } + } + + return [snapshot, newContent]; + + case 'thread.message.in_progress': + case 'thread.message.completed': + case 'thread.message.incomplete': + //No changes on other thread events + if (snapshot) { + return [snapshot, newContent]; + } else { + throw Error('Received thread message event with no existing snapshot'); + } + } + throw Error('Tried to accumulate a non-message event'); + } + + #accumulateContent( + contentElement: MessageContentDelta, + currentContent: TextContentBlock | ImageFileContentBlock | undefined, + ): TextContentBlock | ImageFileContentBlock { + return AssistantStream.accumulateDelta(currentContent as unknown as Record, contentElement) as + | TextContentBlock + | ImageFileContentBlock; + } + + static accumulateDelta(acc: Record, delta: Record): Record { + for (const [key, deltaValue] of Object.entries(delta)) { + if (!acc.hasOwnProperty(key)) { + acc[key] = deltaValue; + continue; + } + + let accValue = acc[key]; + if (accValue === null || accValue === undefined) { + acc[key] = deltaValue; + continue; + } + + // We don't accumulate these special properties + if (key === 'index' || key === 'type') { + acc[key] = deltaValue; + continue; + } + + // Type-specific accumulation logic + if (typeof accValue === 'string' && typeof deltaValue === 'string') { + accValue += deltaValue; + } else if (typeof accValue === 'number' && typeof deltaValue === 'number') { + accValue += deltaValue; + } else if (Core.isObj(accValue) && Core.isObj(deltaValue)) { + accValue = this.accumulateDelta(accValue as Record, deltaValue as Record); + } else if (Array.isArray(accValue) && Array.isArray(deltaValue)) { + if (accValue.every((x) => typeof x === 'string' || typeof x === 'number')) { + accValue.push(...deltaValue); // Use spread syntax for efficient addition + continue; + } + } else { + throw Error(`Unhandled record type: ${key}, deltaValue: ${deltaValue}, accValue: ${accValue}`); + } + acc[key] = accValue; + } + + return acc; + } + + #handleRun(event: RunStreamEvent) { + this.#currentRunSnapshot = event.data; + switch (event.event) { + case 'thread.run.created': + break; + case 'thread.run.queued': + break; + case 'thread.run.in_progress': + break; + case 'thread.run.requires_action': + case 'thread.run.cancelled': + case 'thread.run.failed': + case 'thread.run.completed': + case 'thread.run.expired': + this.#finalRun = event.data; + if (this.#currentToolCall) { + this._emit('toolCallDone', this.#currentToolCall); + this.#currentToolCall = undefined; + } + break; + case 'thread.run.cancelling': + break; + } + } +} diff --git a/src/resources/beta/assistants/assistants.ts b/src/resources/beta/assistants/assistants.ts index 08abb2c91..b4e92fd92 100644 --- a/src/resources/beta/assistants/assistants.ts +++ b/src/resources/beta/assistants/assistants.ts @@ -6,6 +6,10 @@ import { isRequestOptions } from 'openai/core'; import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants'; import * as Shared from 'openai/resources/shared'; import * as FilesAPI from 'openai/resources/beta/assistants/files'; +import * as ThreadsAPI from 'openai/resources/beta/threads/threads'; +import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages'; +import * as RunsAPI from 'openai/resources/beta/threads/runs/runs'; +import * as StepsAPI from 'openai/resources/beta/threads/runs/steps'; import { CursorPage, type CursorPageParams } from 'openai/pagination'; export class Assistants extends APIResource { @@ -145,40 +149,777 @@ export interface Assistant { * A list of tool enabled on the assistant. There can be a maximum of 128 tools per * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. */ - tools: Array; + tools: Array; } -export namespace Assistant { - export interface CodeInterpreter { +export interface AssistantDeleted { + id: string; + + deleted: boolean; + + object: 'assistant.deleted'; +} + +/** + * Represents an event emitted when streaming a Run. + * + * Each event in a server-sent events stream has an `event` and `data` property: + * + * ``` + * event: thread.created + * data: {"id": "thread_123", "object": "thread", ...} + * ``` + * + * We emit events whenever a new object is created, transitions to a new state, or + * is being streamed in parts (deltas). For example, we emit `thread.run.created` + * when a new run is created, `thread.run.completed` when a run completes, and so + * on. When an Assistant chooses to create a message during a run, we emit a + * `thread.message.created event`, a `thread.message.in_progress` event, many + * `thread.message.delta` events, and finally a `thread.message.completed` event. + * + * We may add additional events over time, so we recommend handling unknown events + * gracefully in your code. See the + * [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) + * to learn how to integrate the Assistants API with streaming. + */ +export type AssistantStreamEvent = + | AssistantStreamEvent.ThreadCreated + | AssistantStreamEvent.ThreadRunCreated + | AssistantStreamEvent.ThreadRunQueued + | AssistantStreamEvent.ThreadRunInProgress + | AssistantStreamEvent.ThreadRunRequiresAction + | AssistantStreamEvent.ThreadRunCompleted + | AssistantStreamEvent.ThreadRunFailed + | AssistantStreamEvent.ThreadRunCancelling + | AssistantStreamEvent.ThreadRunCancelled + | AssistantStreamEvent.ThreadRunExpired + | AssistantStreamEvent.ThreadRunStepCreated + | AssistantStreamEvent.ThreadRunStepInProgress + | AssistantStreamEvent.ThreadRunStepDelta + | AssistantStreamEvent.ThreadRunStepCompleted + | AssistantStreamEvent.ThreadRunStepFailed + | AssistantStreamEvent.ThreadRunStepCancelled + | AssistantStreamEvent.ThreadRunStepExpired + | AssistantStreamEvent.ThreadMessageCreated + | AssistantStreamEvent.ThreadMessageInProgress + | AssistantStreamEvent.ThreadMessageDelta + | AssistantStreamEvent.ThreadMessageCompleted + | AssistantStreamEvent.ThreadMessageIncomplete + | AssistantStreamEvent.ErrorEvent; + +export namespace AssistantStreamEvent { + /** + * Occurs when a new + * [thread](https://platform.openai.com/docs/api-reference/threads/object) is + * created. + */ + export interface ThreadCreated { + /** + * Represents a thread that contains + * [messages](https://platform.openai.com/docs/api-reference/messages). + */ + data: ThreadsAPI.Thread; + + event: 'thread.created'; + } + + /** + * Occurs when a new + * [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + */ + export interface ThreadRunCreated { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.created'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * moves to a `queued` status. + */ + export interface ThreadRunQueued { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.queued'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * moves to an `in_progress` status. + */ + export interface ThreadRunInProgress { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.in_progress'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * moves to a `requires_action` status. + */ + export interface ThreadRunRequiresAction { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.requires_action'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * is completed. + */ + export interface ThreadRunCompleted { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.completed'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * fails. + */ + export interface ThreadRunFailed { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.failed'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * moves to a `cancelling` status. + */ + export interface ThreadRunCancelling { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.cancelling'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * is cancelled. + */ + export interface ThreadRunCancelled { /** - * The type of tool being defined: `code_interpreter` + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). */ - type: 'code_interpreter'; + data: RunsAPI.Run; + + event: 'thread.run.cancelled'; } - export interface Retrieval { + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * expires. + */ + export interface ThreadRunExpired { /** - * The type of tool being defined: `retrieval` + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). */ - type: 'retrieval'; + data: RunsAPI.Run; + + event: 'thread.run.expired'; } - export interface Function { - function: Shared.FunctionDefinition; + /** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is + * created. + */ + export interface ThreadRunStepCreated { + /** + * Represents a step in execution of a run. + */ + data: StepsAPI.RunStep; + + event: 'thread.run.step.created'; + } + /** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) + * moves to an `in_progress` state. + */ + export interface ThreadRunStepInProgress { /** - * The type of tool being defined: `function` + * Represents a step in execution of a run. */ - type: 'function'; + data: StepsAPI.RunStep; + + event: 'thread.run.step.in_progress'; + } + + /** + * Occurs when parts of a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) are + * being streamed. + */ + export interface ThreadRunStepDelta { + /** + * Represents a run step delta i.e. any changed fields on a run step during + * streaming. + */ + data: StepsAPI.RunStepDeltaEvent; + + event: 'thread.run.step.delta'; + } + + /** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is + * completed. + */ + export interface ThreadRunStepCompleted { + /** + * Represents a step in execution of a run. + */ + data: StepsAPI.RunStep; + + event: 'thread.run.step.completed'; + } + + /** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) + * fails. + */ + export interface ThreadRunStepFailed { + /** + * Represents a step in execution of a run. + */ + data: StepsAPI.RunStep; + + event: 'thread.run.step.failed'; + } + + /** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is + * cancelled. + */ + export interface ThreadRunStepCancelled { + /** + * Represents a step in execution of a run. + */ + data: StepsAPI.RunStep; + + event: 'thread.run.step.cancelled'; + } + + /** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) + * expires. + */ + export interface ThreadRunStepExpired { + /** + * Represents a step in execution of a run. + */ + data: StepsAPI.RunStep; + + event: 'thread.run.step.expired'; + } + + /** + * Occurs when a + * [message](https://platform.openai.com/docs/api-reference/messages/object) is + * created. + */ + export interface ThreadMessageCreated { + /** + * Represents a message within a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: MessagesAPI.Message; + + event: 'thread.message.created'; + } + + /** + * Occurs when a + * [message](https://platform.openai.com/docs/api-reference/messages/object) moves + * to an `in_progress` state. + */ + export interface ThreadMessageInProgress { + /** + * Represents a message within a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: MessagesAPI.Message; + + event: 'thread.message.in_progress'; + } + + /** + * Occurs when parts of a + * [Message](https://platform.openai.com/docs/api-reference/messages/object) are + * being streamed. + */ + export interface ThreadMessageDelta { + /** + * Represents a message delta i.e. any changed fields on a message during + * streaming. + */ + data: MessagesAPI.MessageDeltaEvent; + + event: 'thread.message.delta'; + } + + /** + * Occurs when a + * [message](https://platform.openai.com/docs/api-reference/messages/object) is + * completed. + */ + export interface ThreadMessageCompleted { + /** + * Represents a message within a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: MessagesAPI.Message; + + event: 'thread.message.completed'; + } + + /** + * Occurs when a + * [message](https://platform.openai.com/docs/api-reference/messages/object) ends + * before it is completed. + */ + export interface ThreadMessageIncomplete { + /** + * Represents a message within a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: MessagesAPI.Message; + + event: 'thread.message.incomplete'; + } + + /** + * Occurs when an + * [error](https://platform.openai.com/docs/guides/error-codes/api-errors) occurs. + * This can happen due to an internal server error or a timeout. + */ + export interface ErrorEvent { + data: Shared.ErrorObject; + + event: 'error'; } } -export interface AssistantDeleted { - id: string; +export type AssistantTool = CodeInterpreterTool | RetrievalTool | FunctionTool; - deleted: boolean; +export interface CodeInterpreterTool { + /** + * The type of tool being defined: `code_interpreter` + */ + type: 'code_interpreter'; +} - object: 'assistant.deleted'; +export interface FunctionTool { + function: Shared.FunctionDefinition; + + /** + * The type of tool being defined: `function` + */ + type: 'function'; +} + +/** + * Occurs when a + * [message](https://platform.openai.com/docs/api-reference/messages/object) is + * created. + */ +export type MessageStreamEvent = + | MessageStreamEvent.ThreadMessageCreated + | MessageStreamEvent.ThreadMessageInProgress + | MessageStreamEvent.ThreadMessageDelta + | MessageStreamEvent.ThreadMessageCompleted + | MessageStreamEvent.ThreadMessageIncomplete; + +export namespace MessageStreamEvent { + /** + * Occurs when a + * [message](https://platform.openai.com/docs/api-reference/messages/object) is + * created. + */ + export interface ThreadMessageCreated { + /** + * Represents a message within a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: MessagesAPI.Message; + + event: 'thread.message.created'; + } + + /** + * Occurs when a + * [message](https://platform.openai.com/docs/api-reference/messages/object) moves + * to an `in_progress` state. + */ + export interface ThreadMessageInProgress { + /** + * Represents a message within a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: MessagesAPI.Message; + + event: 'thread.message.in_progress'; + } + + /** + * Occurs when parts of a + * [Message](https://platform.openai.com/docs/api-reference/messages/object) are + * being streamed. + */ + export interface ThreadMessageDelta { + /** + * Represents a message delta i.e. any changed fields on a message during + * streaming. + */ + data: MessagesAPI.MessageDeltaEvent; + + event: 'thread.message.delta'; + } + + /** + * Occurs when a + * [message](https://platform.openai.com/docs/api-reference/messages/object) is + * completed. + */ + export interface ThreadMessageCompleted { + /** + * Represents a message within a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: MessagesAPI.Message; + + event: 'thread.message.completed'; + } + + /** + * Occurs when a + * [message](https://platform.openai.com/docs/api-reference/messages/object) ends + * before it is completed. + */ + export interface ThreadMessageIncomplete { + /** + * Represents a message within a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: MessagesAPI.Message; + + event: 'thread.message.incomplete'; + } +} + +export interface RetrievalTool { + /** + * The type of tool being defined: `retrieval` + */ + type: 'retrieval'; +} + +/** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is + * created. + */ +export type RunStepStreamEvent = + | RunStepStreamEvent.ThreadRunStepCreated + | RunStepStreamEvent.ThreadRunStepInProgress + | RunStepStreamEvent.ThreadRunStepDelta + | RunStepStreamEvent.ThreadRunStepCompleted + | RunStepStreamEvent.ThreadRunStepFailed + | RunStepStreamEvent.ThreadRunStepCancelled + | RunStepStreamEvent.ThreadRunStepExpired; + +export namespace RunStepStreamEvent { + /** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is + * created. + */ + export interface ThreadRunStepCreated { + /** + * Represents a step in execution of a run. + */ + data: StepsAPI.RunStep; + + event: 'thread.run.step.created'; + } + + /** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) + * moves to an `in_progress` state. + */ + export interface ThreadRunStepInProgress { + /** + * Represents a step in execution of a run. + */ + data: StepsAPI.RunStep; + + event: 'thread.run.step.in_progress'; + } + + /** + * Occurs when parts of a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) are + * being streamed. + */ + export interface ThreadRunStepDelta { + /** + * Represents a run step delta i.e. any changed fields on a run step during + * streaming. + */ + data: StepsAPI.RunStepDeltaEvent; + + event: 'thread.run.step.delta'; + } + + /** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is + * completed. + */ + export interface ThreadRunStepCompleted { + /** + * Represents a step in execution of a run. + */ + data: StepsAPI.RunStep; + + event: 'thread.run.step.completed'; + } + + /** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) + * fails. + */ + export interface ThreadRunStepFailed { + /** + * Represents a step in execution of a run. + */ + data: StepsAPI.RunStep; + + event: 'thread.run.step.failed'; + } + + /** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is + * cancelled. + */ + export interface ThreadRunStepCancelled { + /** + * Represents a step in execution of a run. + */ + data: StepsAPI.RunStep; + + event: 'thread.run.step.cancelled'; + } + + /** + * Occurs when a + * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) + * expires. + */ + export interface ThreadRunStepExpired { + /** + * Represents a step in execution of a run. + */ + data: StepsAPI.RunStep; + + event: 'thread.run.step.expired'; + } +} + +/** + * Occurs when a new + * [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + */ +export type RunStreamEvent = + | RunStreamEvent.ThreadRunCreated + | RunStreamEvent.ThreadRunQueued + | RunStreamEvent.ThreadRunInProgress + | RunStreamEvent.ThreadRunRequiresAction + | RunStreamEvent.ThreadRunCompleted + | RunStreamEvent.ThreadRunFailed + | RunStreamEvent.ThreadRunCancelling + | RunStreamEvent.ThreadRunCancelled + | RunStreamEvent.ThreadRunExpired; + +export namespace RunStreamEvent { + /** + * Occurs when a new + * [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + */ + export interface ThreadRunCreated { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.created'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * moves to a `queued` status. + */ + export interface ThreadRunQueued { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.queued'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * moves to an `in_progress` status. + */ + export interface ThreadRunInProgress { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.in_progress'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * moves to a `requires_action` status. + */ + export interface ThreadRunRequiresAction { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.requires_action'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * is completed. + */ + export interface ThreadRunCompleted { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.completed'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * fails. + */ + export interface ThreadRunFailed { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.failed'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * moves to a `cancelling` status. + */ + export interface ThreadRunCancelling { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.cancelling'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * is cancelled. + */ + export interface ThreadRunCancelled { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.cancelled'; + } + + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * expires. + */ + export interface ThreadRunExpired { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.expired'; + } +} + +/** + * Occurs when a new + * [thread](https://platform.openai.com/docs/api-reference/threads/object) is + * created. + */ +export interface ThreadStreamEvent { + /** + * Represents a thread that contains + * [messages](https://platform.openai.com/docs/api-reference/messages). + */ + data: ThreadsAPI.Thread; + + event: 'thread.created'; } export interface AssistantCreateParams { @@ -226,36 +967,7 @@ export interface AssistantCreateParams { * A list of tool enabled on the assistant. There can be a maximum of 128 tools per * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. */ - tools?: Array< - | AssistantCreateParams.AssistantToolsCode - | AssistantCreateParams.AssistantToolsRetrieval - | AssistantCreateParams.AssistantToolsFunction - >; -} - -export namespace AssistantCreateParams { - export interface AssistantToolsCode { - /** - * The type of tool being defined: `code_interpreter` - */ - type: 'code_interpreter'; - } - - export interface AssistantToolsRetrieval { - /** - * The type of tool being defined: `retrieval` - */ - type: 'retrieval'; - } - - export interface AssistantToolsFunction { - function: Shared.FunctionDefinition; - - /** - * The type of tool being defined: `function` - */ - type: 'function'; - } + tools?: Array; } export interface AssistantUpdateParams { @@ -305,36 +1017,7 @@ export interface AssistantUpdateParams { * A list of tool enabled on the assistant. There can be a maximum of 128 tools per * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. */ - tools?: Array< - | AssistantUpdateParams.AssistantToolsCode - | AssistantUpdateParams.AssistantToolsRetrieval - | AssistantUpdateParams.AssistantToolsFunction - >; -} - -export namespace AssistantUpdateParams { - export interface AssistantToolsCode { - /** - * The type of tool being defined: `code_interpreter` - */ - type: 'code_interpreter'; - } - - export interface AssistantToolsRetrieval { - /** - * The type of tool being defined: `retrieval` - */ - type: 'retrieval'; - } - - export interface AssistantToolsFunction { - function: Shared.FunctionDefinition; - - /** - * The type of tool being defined: `function` - */ - type: 'function'; - } + tools?: Array; } export interface AssistantListParams extends CursorPageParams { @@ -356,6 +1039,15 @@ export interface AssistantListParams extends CursorPageParams { export namespace Assistants { export import Assistant = AssistantsAPI.Assistant; export import AssistantDeleted = AssistantsAPI.AssistantDeleted; + export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent; + export import AssistantTool = AssistantsAPI.AssistantTool; + export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool; + export import FunctionTool = AssistantsAPI.FunctionTool; + export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent; + export import RetrievalTool = AssistantsAPI.RetrievalTool; + export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent; + export import RunStreamEvent = AssistantsAPI.RunStreamEvent; + export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent; export import AssistantsPage = AssistantsAPI.AssistantsPage; export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams; export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams; diff --git a/src/resources/beta/assistants/index.ts b/src/resources/beta/assistants/index.ts index 5236bc8de..0ae8c9c67 100644 --- a/src/resources/beta/assistants/index.ts +++ b/src/resources/beta/assistants/index.ts @@ -3,6 +3,15 @@ export { Assistant, AssistantDeleted, + AssistantStreamEvent, + AssistantTool, + CodeInterpreterTool, + FunctionTool, + MessageStreamEvent, + RetrievalTool, + RunStepStreamEvent, + RunStreamEvent, + ThreadStreamEvent, AssistantCreateParams, AssistantUpdateParams, AssistantListParams, diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 5fd99990d..74056ed1d 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -16,6 +16,15 @@ export namespace Beta { export import Assistants = AssistantsAPI.Assistants; export import Assistant = AssistantsAPI.Assistant; export import AssistantDeleted = AssistantsAPI.AssistantDeleted; + export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent; + export import AssistantTool = AssistantsAPI.AssistantTool; + export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool; + export import FunctionTool = AssistantsAPI.FunctionTool; + export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent; + export import RetrievalTool = AssistantsAPI.RetrievalTool; + export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent; + export import RunStreamEvent = AssistantsAPI.RunStreamEvent; + export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent; export import AssistantsPage = AssistantsAPI.AssistantsPage; export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams; export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams; @@ -26,4 +35,7 @@ export namespace Beta { export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams; export import ThreadUpdateParams = ThreadsAPI.ThreadUpdateParams; export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams; + export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; + export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; + export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams; } diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index 4ed7e84b1..d8770c29a 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -3,6 +3,15 @@ export { Assistant, AssistantDeleted, + AssistantStreamEvent, + AssistantTool, + CodeInterpreterTool, + FunctionTool, + MessageStreamEvent, + RetrievalTool, + RunStepStreamEvent, + RunStreamEvent, + ThreadStreamEvent, AssistantCreateParams, AssistantUpdateParams, AssistantListParams, @@ -17,5 +26,8 @@ export { ThreadCreateParams, ThreadUpdateParams, ThreadCreateAndRunParams, + ThreadCreateAndRunParamsNonStreaming, + ThreadCreateAndRunParamsStreaming, + ThreadCreateAndRunStreamParams, Threads, } from './threads/index'; diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index 54a02dd03..3585be846 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -1,14 +1,30 @@ // File generated from our OpenAPI spec by Stainless. export { - MessageContentImageFile, - MessageContentText, - ThreadMessage, - ThreadMessageDeleted, + Annotation, + AnnotationDelta, + FileCitationAnnotation, + FileCitationDeltaAnnotation, + FilePathAnnotation, + FilePathDeltaAnnotation, + ImageFile, + ImageFileContentBlock, + ImageFileDelta, + ImageFileDeltaBlock, + Message, + MessageContent, + MessageContentDelta, + MessageDeleted, + MessageDelta, + MessageDeltaEvent, + Text, + TextContentBlock, + TextDelta, + TextDeltaBlock, MessageCreateParams, MessageUpdateParams, MessageListParams, - ThreadMessagesPage, + MessagesPage, Messages, } from './messages/index'; export { @@ -16,9 +32,15 @@ export { Run, RunStatus, RunCreateParams, + RunCreateParamsNonStreaming, + RunCreateParamsStreaming, RunUpdateParams, RunListParams, + RunCreateAndStreamParams, RunSubmitToolOutputsParams, + RunSubmitToolOutputsParamsNonStreaming, + RunSubmitToolOutputsParamsStreaming, + RunSubmitToolOutputsStreamParams, RunsPage, Runs, } from './runs/index'; @@ -28,5 +50,8 @@ export { ThreadCreateParams, ThreadUpdateParams, ThreadCreateAndRunParams, + ThreadCreateAndRunParamsNonStreaming, + ThreadCreateAndRunParamsStreaming, + ThreadCreateAndRunStreamParams, Threads, } from './threads'; diff --git a/src/resources/beta/threads/messages/index.ts b/src/resources/beta/threads/messages/index.ts index cde22c2a9..f68edbbd4 100644 --- a/src/resources/beta/threads/messages/index.ts +++ b/src/resources/beta/threads/messages/index.ts @@ -1,14 +1,30 @@ // File generated from our OpenAPI spec by Stainless. export { - MessageContentImageFile, - MessageContentText, - ThreadMessage, - ThreadMessageDeleted, + Annotation, + AnnotationDelta, + FileCitationAnnotation, + FileCitationDeltaAnnotation, + FilePathAnnotation, + FilePathDeltaAnnotation, + ImageFile, + ImageFileContentBlock, + ImageFileDelta, + ImageFileDeltaBlock, + Message, + MessageContent, + MessageContentDelta, + MessageDeleted, + MessageDelta, + MessageDeltaEvent, + Text, + TextContentBlock, + TextDelta, + TextDeltaBlock, MessageCreateParams, MessageUpdateParams, MessageListParams, - ThreadMessagesPage, + MessagesPage, Messages, } from './messages'; export { MessageFile, FileListParams, MessageFilesPage, Files } from './files'; diff --git a/src/resources/beta/threads/messages/messages.ts b/src/resources/beta/threads/messages/messages.ts index 40b436829..b38a4bbf0 100644 --- a/src/resources/beta/threads/messages/messages.ts +++ b/src/resources/beta/threads/messages/messages.ts @@ -17,7 +17,7 @@ export class Messages extends APIResource { threadId: string, body: MessageCreateParams, options?: Core.RequestOptions, - ): Core.APIPromise { + ): Core.APIPromise { return this._client.post(`/threads/${threadId}/messages`, { body, ...options, @@ -28,11 +28,7 @@ export class Messages extends APIResource { /** * Retrieve a message. */ - retrieve( - threadId: string, - messageId: string, - options?: Core.RequestOptions, - ): Core.APIPromise { + retrieve(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}/messages/${messageId}`, { ...options, headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, @@ -47,7 +43,7 @@ export class Messages extends APIResource { messageId: string, body: MessageUpdateParams, options?: Core.RequestOptions, - ): Core.APIPromise { + ): Core.APIPromise { return this._client.post(`/threads/${threadId}/messages/${messageId}`, { body, ...options, @@ -62,17 +58,17 @@ export class Messages extends APIResource { threadId: string, query?: MessageListParams, options?: Core.RequestOptions, - ): Core.PagePromise; - list(threadId: string, options?: Core.RequestOptions): Core.PagePromise; + ): Core.PagePromise; + list(threadId: string, options?: Core.RequestOptions): Core.PagePromise; list( threadId: string, query: MessageListParams | Core.RequestOptions = {}, options?: Core.RequestOptions, - ): Core.PagePromise { + ): Core.PagePromise { if (isRequestOptions(query)) { return this.list(threadId, {}, query); } - return this._client.getAPIList(`/threads/${threadId}/messages`, ThreadMessagesPage, { + return this._client.getAPIList(`/threads/${threadId}/messages`, MessagesPage, { query, ...options, headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, @@ -80,129 +76,220 @@ export class Messages extends APIResource { } } -export class ThreadMessagesPage extends CursorPage {} +export class MessagesPage extends CursorPage {} /** - * References an image [File](https://platform.openai.com/docs/api-reference/files) - * in the content of a message. + * A citation within the message that points to a specific quote from a specific + * File associated with the assistant or the message. Generated when the assistant + * uses the "retrieval" tool to search files. + */ +export type Annotation = FileCitationAnnotation | FilePathAnnotation; + +/** + * A citation within the message that points to a specific quote from a specific + * File associated with the assistant or the message. Generated when the assistant + * uses the "retrieval" tool to search files. + */ +export type AnnotationDelta = FileCitationDeltaAnnotation | FilePathDeltaAnnotation; + +/** + * A citation within the message that points to a specific quote from a specific + * File associated with the assistant or the message. Generated when the assistant + * uses the "retrieval" tool to search files. */ -export interface MessageContentImageFile { - image_file: MessageContentImageFile.ImageFile; +export interface FileCitationAnnotation { + end_index: number; + + file_citation: FileCitationAnnotation.FileCitation; + + start_index: number; /** - * Always `image_file`. + * The text in the message content that needs to be replaced. */ - type: 'image_file'; + text: string; + + /** + * Always `file_citation`. + */ + type: 'file_citation'; } -export namespace MessageContentImageFile { - export interface ImageFile { +export namespace FileCitationAnnotation { + export interface FileCitation { /** - * The [File](https://platform.openai.com/docs/api-reference/files) ID of the image - * in the message content. + * The ID of the specific File the citation is from. */ file_id: string; + + /** + * The specific quote in the file. + */ + quote: string; } } /** - * The text content that is part of a message. + * A citation within the message that points to a specific quote from a specific + * File associated with the assistant or the message. Generated when the assistant + * uses the "retrieval" tool to search files. */ -export interface MessageContentText { - text: MessageContentText.Text; +export interface FileCitationDeltaAnnotation { + /** + * The index of the annotation in the text content part. + */ + index: number; /** - * Always `text`. + * Always `file_citation`. */ - type: 'text'; + type: 'file_citation'; + + end_index?: number; + + file_citation?: FileCitationDeltaAnnotation.FileCitation; + + start_index?: number; + + /** + * The text in the message content that needs to be replaced. + */ + text?: string; } -export namespace MessageContentText { - export interface Text { - annotations: Array; +export namespace FileCitationDeltaAnnotation { + export interface FileCitation { + /** + * The ID of the specific File the citation is from. + */ + file_id?: string; /** - * The data that makes up the text. + * The specific quote in the file. */ - value: string; + quote?: string; } +} + +/** + * A URL for the file that's generated when the assistant used the + * `code_interpreter` tool to generate a file. + */ +export interface FilePathAnnotation { + end_index: number; + + file_path: FilePathAnnotation.FilePath; + + start_index: number; + + /** + * The text in the message content that needs to be replaced. + */ + text: string; + + /** + * Always `file_path`. + */ + type: 'file_path'; +} - export namespace Text { +export namespace FilePathAnnotation { + export interface FilePath { /** - * A citation within the message that points to a specific quote from a specific - * File associated with the assistant or the message. Generated when the assistant - * uses the "retrieval" tool to search files. + * The ID of the file that was generated. */ - export interface FileCitation { - end_index: number; + file_id: string; + } +} + +/** + * A URL for the file that's generated when the assistant used the + * `code_interpreter` tool to generate a file. + */ +export interface FilePathDeltaAnnotation { + /** + * The index of the annotation in the text content part. + */ + index: number; - file_citation: FileCitation.FileCitation; + /** + * Always `file_path`. + */ + type: 'file_path'; - start_index: number; + end_index?: number; - /** - * The text in the message content that needs to be replaced. - */ - text: string; + file_path?: FilePathDeltaAnnotation.FilePath; - /** - * Always `file_citation`. - */ - type: 'file_citation'; - } + start_index?: number; - export namespace FileCitation { - export interface FileCitation { - /** - * The ID of the specific File the citation is from. - */ - file_id: string; - - /** - * The specific quote in the file. - */ - quote: string; - } - } + /** + * The text in the message content that needs to be replaced. + */ + text?: string; +} +export namespace FilePathDeltaAnnotation { + export interface FilePath { /** - * A URL for the file that's generated when the assistant used the - * `code_interpreter` tool to generate a file. + * The ID of the file that was generated. */ - export interface FilePath { - end_index: number; + file_id?: string; + } +} - file_path: FilePath.FilePath; +export interface ImageFile { + /** + * The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + * in the message content. + */ + file_id: string; +} - start_index: number; +/** + * References an image [File](https://platform.openai.com/docs/api-reference/files) + * in the content of a message. + */ +export interface ImageFileContentBlock { + image_file: ImageFile; - /** - * The text in the message content that needs to be replaced. - */ - text: string; + /** + * Always `image_file`. + */ + type: 'image_file'; +} - /** - * Always `file_path`. - */ - type: 'file_path'; - } +export interface ImageFileDelta { + /** + * The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + * in the message content. + */ + file_id?: string; +} - export namespace FilePath { - export interface FilePath { - /** - * The ID of the file that was generated. - */ - file_id: string; - } - } - } +/** + * References an image [File](https://platform.openai.com/docs/api-reference/files) + * in the content of a message. + */ +export interface ImageFileDeltaBlock { + /** + * The index of the content part in the message. + */ + index: number; + + /** + * Always `image_file`. + */ + type: 'image_file'; + + image_file?: ImageFileDelta; } /** * Represents a message within a * [thread](https://platform.openai.com/docs/api-reference/threads). */ -export interface ThreadMessage { +export interface Message { /** * The identifier, which can be referenced in API endpoints. */ @@ -215,10 +302,15 @@ export interface ThreadMessage { */ assistant_id: string | null; + /** + * The Unix timestamp (in seconds) for when the message was completed. + */ + completed_at: number | null; + /** * The content of the message in array of text and/or images. */ - content: Array; + content: Array; /** * The Unix timestamp (in seconds) for when the message was created. @@ -232,6 +324,16 @@ export interface ThreadMessage { */ file_ids: Array; + /** + * The Unix timestamp (in seconds) for when the message was marked as incomplete. + */ + incomplete_at: number | null; + + /** + * On an incomplete message, details about why the message is incomplete. + */ + incomplete_details: Message.IncompleteDetails | null; + /** * Set of 16 key-value pairs that can be attached to an object. This can be useful * for storing additional information about the object in a structured format. Keys @@ -257,6 +359,12 @@ export interface ThreadMessage { */ run_id: string | null; + /** + * The status of the message, which can be either `in_progress`, `incomplete`, or + * `completed`. + */ + status: 'in_progress' | 'incomplete' | 'completed'; + /** * The [thread](https://platform.openai.com/docs/api-reference/threads) ID that * this message belongs to. @@ -264,7 +372,31 @@ export interface ThreadMessage { thread_id: string; } -export interface ThreadMessageDeleted { +export namespace Message { + /** + * On an incomplete message, details about why the message is incomplete. + */ + export interface IncompleteDetails { + /** + * The reason the message is incomplete. + */ + reason: 'content_filter' | 'max_tokens' | 'run_cancelled' | 'run_expired' | 'run_failed'; + } +} + +/** + * References an image [File](https://platform.openai.com/docs/api-reference/files) + * in the content of a message. + */ +export type MessageContent = ImageFileContentBlock | TextContentBlock; + +/** + * References an image [File](https://platform.openai.com/docs/api-reference/files) + * in the content of a message. + */ +export type MessageContentDelta = ImageFileDeltaBlock | TextDeltaBlock; + +export interface MessageDeleted { id: string; deleted: boolean; @@ -272,6 +404,96 @@ export interface ThreadMessageDeleted { object: 'thread.message.deleted'; } +/** + * The delta containing the fields that have changed on the Message. + */ +export interface MessageDelta { + /** + * The content of the message in array of text and/or images. + */ + content?: Array; + + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs that + * the assistant should use. Useful for tools like retrieval and code_interpreter + * that can access files. A maximum of 10 files can be attached to a message. + */ + file_ids?: Array; + + /** + * The entity that produced the message. One of `user` or `assistant`. + */ + role?: 'user' | 'assistant'; +} + +/** + * Represents a message delta i.e. any changed fields on a message during + * streaming. + */ +export interface MessageDeltaEvent { + /** + * The identifier of the message, which can be referenced in API endpoints. + */ + id: string; + + /** + * The delta containing the fields that have changed on the Message. + */ + delta: MessageDelta; + + /** + * The object type, which is always `thread.message.delta`. + */ + object: 'thread.message.delta'; +} + +export interface Text { + annotations: Array; + + /** + * The data that makes up the text. + */ + value: string; +} + +/** + * The text content that is part of a message. + */ +export interface TextContentBlock { + text: Text; + + /** + * Always `text`. + */ + type: 'text'; +} + +export interface TextDelta { + annotations?: Array; + + /** + * The data that makes up the text. + */ + value?: string; +} + +/** + * The text content that is part of a message. + */ +export interface TextDeltaBlock { + /** + * The index of the content part in the message. + */ + index: number; + + /** + * Always `text`. + */ + type: 'text'; + + text?: TextDelta; +} + export interface MessageCreateParams { /** * The content of the message. @@ -328,11 +550,27 @@ export interface MessageListParams extends CursorPageParams { } export namespace Messages { - export import MessageContentImageFile = MessagesAPI.MessageContentImageFile; - export import MessageContentText = MessagesAPI.MessageContentText; - export import ThreadMessage = MessagesAPI.ThreadMessage; - export import ThreadMessageDeleted = MessagesAPI.ThreadMessageDeleted; - export import ThreadMessagesPage = MessagesAPI.ThreadMessagesPage; + export import Annotation = MessagesAPI.Annotation; + export import AnnotationDelta = MessagesAPI.AnnotationDelta; + export import FileCitationAnnotation = MessagesAPI.FileCitationAnnotation; + export import FileCitationDeltaAnnotation = MessagesAPI.FileCitationDeltaAnnotation; + export import FilePathAnnotation = MessagesAPI.FilePathAnnotation; + export import FilePathDeltaAnnotation = MessagesAPI.FilePathDeltaAnnotation; + export import ImageFile = MessagesAPI.ImageFile; + export import ImageFileContentBlock = MessagesAPI.ImageFileContentBlock; + export import ImageFileDelta = MessagesAPI.ImageFileDelta; + export import ImageFileDeltaBlock = MessagesAPI.ImageFileDeltaBlock; + export import Message = MessagesAPI.Message; + export import MessageContent = MessagesAPI.MessageContent; + export import MessageContentDelta = MessagesAPI.MessageContentDelta; + export import MessageDeleted = MessagesAPI.MessageDeleted; + export import MessageDelta = MessagesAPI.MessageDelta; + export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent; + export import Text = MessagesAPI.Text; + export import TextContentBlock = MessagesAPI.TextContentBlock; + export import TextDelta = MessagesAPI.TextDelta; + export import TextDeltaBlock = MessagesAPI.TextDeltaBlock; + export import MessagesPage = MessagesAPI.MessagesPage; export import MessageCreateParams = MessagesAPI.MessageCreateParams; export import MessageUpdateParams = MessagesAPI.MessageUpdateParams; export import MessageListParams = MessagesAPI.MessageListParams; diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts index b11736c5c..7fa34637a 100644 --- a/src/resources/beta/threads/runs/index.ts +++ b/src/resources/beta/threads/runs/index.ts @@ -1,11 +1,22 @@ // File generated from our OpenAPI spec by Stainless. export { - CodeToolCall, + CodeInterpreterLogs, + CodeInterpreterOutputImage, + CodeInterpreterToolCall, + CodeInterpreterToolCallDelta, FunctionToolCall, + FunctionToolCallDelta, MessageCreationStepDetails, RetrievalToolCall, + RetrievalToolCallDelta, RunStep, + RunStepDelta, + RunStepDeltaEvent, + RunStepDeltaMessageDelta, + ToolCall, + ToolCallDelta, + ToolCallDeltaObject, ToolCallsStepDetails, StepListParams, RunStepsPage, @@ -16,9 +27,15 @@ export { Run, RunStatus, RunCreateParams, + RunCreateParamsNonStreaming, + RunCreateParamsStreaming, RunUpdateParams, RunListParams, + RunCreateAndStreamParams, RunSubmitToolOutputsParams, + RunSubmitToolOutputsParamsNonStreaming, + RunSubmitToolOutputsParamsStreaming, + RunSubmitToolOutputsStreamParams, RunsPage, Runs, } from './runs'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 9a0bc00dd..8fe09ecc6 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -1,12 +1,16 @@ // File generated from our OpenAPI spec by Stainless. import * as Core from 'openai/core'; +import { APIPromise } from 'openai/core'; import { APIResource } from 'openai/resource'; import { isRequestOptions } from 'openai/core'; +import { AssistantStream, RunCreateParamsBaseStream } from 'openai/lib/AssistantStream'; +import { RunSubmitToolOutputsParamsStream } from 'openai/lib/AssistantStream'; import * as RunsAPI from 'openai/resources/beta/threads/runs/runs'; -import * as Shared from 'openai/resources/shared'; +import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants'; import * as StepsAPI from 'openai/resources/beta/threads/runs/steps'; import { CursorPage, type CursorPageParams } from 'openai/pagination'; +import { Stream } from 'openai/streaming'; export class Runs extends APIResource { steps: StepsAPI.Steps = new StepsAPI.Steps(this._client); @@ -14,12 +18,28 @@ export class Runs extends APIResource { /** * Create a run. */ - create(threadId: string, body: RunCreateParams, options?: Core.RequestOptions): Core.APIPromise { + create(threadId: string, body: RunCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; + create( + threadId: string, + body: RunCreateParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + create( + threadId: string, + body: RunCreateParamsBase, + options?: Core.RequestOptions, + ): APIPromise | Run>; + create( + threadId: string, + body: RunCreateParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { return this._client.post(`/threads/${threadId}/runs`, { body, ...options, headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, - }); + stream: body.stream ?? false, + }) as APIPromise | APIPromise>; } /** @@ -82,23 +102,72 @@ export class Runs extends APIResource { }); } + /** + * Create a Run stream + */ + createAndStream( + threadId: string, + body: RunCreateParamsBaseStream, + options?: Core.RequestOptions, + ): AssistantStream { + return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options); + } + /** * When a run has the `status: "requires_action"` and `required_action.type` is * `submit_tool_outputs`, this endpoint can be used to submit the outputs from the * tool calls once they're all completed. All outputs must be submitted in a single * request. */ + submitToolOutputs( + threadId: string, + runId: string, + body: RunSubmitToolOutputsParamsNonStreaming, + options?: Core.RequestOptions, + ): APIPromise; + submitToolOutputs( + threadId: string, + runId: string, + body: RunSubmitToolOutputsParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + submitToolOutputs( + threadId: string, + runId: string, + body: RunSubmitToolOutputsParamsBase, + options?: Core.RequestOptions, + ): APIPromise | Run>; submitToolOutputs( threadId: string, runId: string, body: RunSubmitToolOutputsParams, options?: Core.RequestOptions, - ): Core.APIPromise { + ): APIPromise | APIPromise> { return this._client.post(`/threads/${threadId}/runs/${runId}/submit_tool_outputs`, { body, ...options, headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, - }); + stream: body.stream ?? false, + }) as APIPromise | APIPromise>; + } + + /** + * Submit the tool outputs from a previous run and stream the run to a terminal + * state. + */ + submitToolOutputsStream( + threadId: string, + runId: string, + body: RunSubmitToolOutputsParamsStream, + options?: Core.RequestOptions, + ): AssistantStream { + return AssistantStream.createToolAssistantStream( + threadId, + runId, + this._client.beta.threads.runs, + body, + options, + ); } } @@ -180,7 +249,7 @@ export interface Run { /** * The Unix timestamp (in seconds) for when the run will expire. */ - expires_at: number; + expires_at: number | null; /** * The Unix timestamp (in seconds) for when the run failed. @@ -255,7 +324,7 @@ export interface Run { * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for * this run. */ - tools: Array; + tools: Array; /** * Usage statistics related to the run. This value will be `null` if the run is not @@ -308,29 +377,6 @@ export namespace Run { } } - export interface AssistantToolsCode { - /** - * The type of tool being defined: `code_interpreter` - */ - type: 'code_interpreter'; - } - - export interface AssistantToolsRetrieval { - /** - * The type of tool being defined: `retrieval` - */ - type: 'retrieval'; - } - - export interface AssistantToolsFunction { - function: Shared.FunctionDefinition; - - /** - * The type of tool being defined: `function` - */ - type: 'function'; - } - /** * Usage statistics related to the run. This value will be `null` if the run is not * in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -368,7 +414,9 @@ export type RunStatus = | 'completed' | 'expired'; -export interface RunCreateParams { +export type RunCreateParams = RunCreateParamsNonStreaming | RunCreateParamsStreaming; + +export interface RunCreateParamsBase { /** * The ID of the * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to @@ -406,40 +454,41 @@ export interface RunCreateParams { */ model?: string | null; + /** + * If `true`, returns a stream of events that happen during the Run as server-sent + * events, terminating when the Run enters a terminal state with a `data: [DONE]` + * message. + */ + stream?: boolean | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. */ - tools?: Array< - | RunCreateParams.AssistantToolsCode - | RunCreateParams.AssistantToolsRetrieval - | RunCreateParams.AssistantToolsFunction - > | null; + tools?: Array | null; } export namespace RunCreateParams { - export interface AssistantToolsCode { - /** - * The type of tool being defined: `code_interpreter` - */ - type: 'code_interpreter'; - } - - export interface AssistantToolsRetrieval { - /** - * The type of tool being defined: `retrieval` - */ - type: 'retrieval'; - } + export type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; + export type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; +} - export interface AssistantToolsFunction { - function: Shared.FunctionDefinition; +export interface RunCreateParamsNonStreaming extends RunCreateParamsBase { + /** + * If `true`, returns a stream of events that happen during the Run as server-sent + * events, terminating when the Run enters a terminal state with a `data: [DONE]` + * message. + */ + stream?: false | null; +} - /** - * The type of tool being defined: `function` - */ - type: 'function'; - } +export interface RunCreateParamsStreaming extends RunCreateParamsBase { + /** + * If `true`, returns a stream of events that happen during the Run as server-sent + * events, terminating when the Run enters a terminal state with a `data: [DONE]` + * message. + */ + stream: true; } export interface RunUpdateParams { @@ -468,11 +517,67 @@ export interface RunListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export interface RunSubmitToolOutputsParams { +export interface RunCreateAndStreamParams { + /** + * The ID of the + * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + * execute this run. + */ + assistant_id: string; + + /** + * Appends additional instructions at the end of the instructions for the run. This + * is useful for modifying the behavior on a per-run basis without overriding other + * instructions. + */ + additional_instructions?: string | null; + + /** + * Overrides the + * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + * of the assistant. This is useful for modifying the behavior on a per-run basis. + */ + instructions?: string | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + + /** + * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + * be used to execute this run. If a value is provided here, it will override the + * model associated with the assistant. If not, the model associated with the + * assistant will be used. + */ + model?: string | null; + + /** + * Override the tools the assistant can use for this run. This is useful for + * modifying the behavior on a per-run basis. + */ + tools?: Array | null; +} + +export type RunSubmitToolOutputsParams = + | RunSubmitToolOutputsParamsNonStreaming + | RunSubmitToolOutputsParamsStreaming; + +export interface RunSubmitToolOutputsParamsBase { /** * A list of tools for which the outputs are being submitted. */ tool_outputs: Array; + + /** + * If `true`, returns a stream of events that happen during the Run as server-sent + * events, terminating when the Run enters a terminal state with a `data: [DONE]` + * message. + */ + stream?: boolean | null; } export namespace RunSubmitToolOutputsParams { @@ -488,6 +593,49 @@ export namespace RunSubmitToolOutputsParams { */ tool_call_id?: string; } + + export type RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming; + export type RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming; +} + +export interface RunSubmitToolOutputsParamsNonStreaming extends RunSubmitToolOutputsParamsBase { + /** + * If `true`, returns a stream of events that happen during the Run as server-sent + * events, terminating when the Run enters a terminal state with a `data: [DONE]` + * message. + */ + stream?: false | null; +} + +export interface RunSubmitToolOutputsParamsStreaming extends RunSubmitToolOutputsParamsBase { + /** + * If `true`, returns a stream of events that happen during the Run as server-sent + * events, terminating when the Run enters a terminal state with a `data: [DONE]` + * message. + */ + stream: true; +} + +export interface RunSubmitToolOutputsStreamParams { + /** + * A list of tools for which the outputs are being submitted. + */ + tool_outputs: Array; +} + +export namespace RunSubmitToolOutputsStreamParams { + export interface ToolOutput { + /** + * The output of the tool call to be submitted to continue the run. + */ + output?: string; + + /** + * The ID of the tool call in the `required_action` object within the run object + * the output is being submitted for. + */ + tool_call_id?: string; + } } export namespace Runs { @@ -496,15 +644,32 @@ export namespace Runs { export import RunStatus = RunsAPI.RunStatus; export import RunsPage = RunsAPI.RunsPage; export import RunCreateParams = RunsAPI.RunCreateParams; + export import RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; + export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; export import RunUpdateParams = RunsAPI.RunUpdateParams; export import RunListParams = RunsAPI.RunListParams; + export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams; export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams; + export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming; + export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming; + export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams; export import Steps = StepsAPI.Steps; - export import CodeToolCall = StepsAPI.CodeToolCall; + export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs; + export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage; + export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall; + export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta; export import FunctionToolCall = StepsAPI.FunctionToolCall; + export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta; export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails; export import RetrievalToolCall = StepsAPI.RetrievalToolCall; + export import RetrievalToolCallDelta = StepsAPI.RetrievalToolCallDelta; export import RunStep = StepsAPI.RunStep; + export import RunStepDelta = StepsAPI.RunStepDelta; + export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent; + export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta; + export import ToolCall = StepsAPI.ToolCall; + export import ToolCallDelta = StepsAPI.ToolCallDelta; + export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject; export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails; export import RunStepsPage = StepsAPI.RunStepsPage; export import StepListParams = StepsAPI.StepListParams; diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index c574c94d1..4218e9769 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -55,10 +55,54 @@ export class Steps extends APIResource { export class RunStepsPage extends CursorPage {} +/** + * Text output from the Code Interpreter tool call as part of a run step. + */ +export interface CodeInterpreterLogs { + /** + * The index of the output in the outputs array. + */ + index: number; + + /** + * Always `logs`. + */ + type: 'logs'; + + /** + * The text output from the Code Interpreter tool call. + */ + logs?: string; +} + +export interface CodeInterpreterOutputImage { + /** + * The index of the output in the outputs array. + */ + index: number; + + /** + * Always `image`. + */ + type: 'image'; + + image?: CodeInterpreterOutputImage.Image; +} + +export namespace CodeInterpreterOutputImage { + export interface Image { + /** + * The [file](https://platform.openai.com/docs/api-reference/files) ID of the + * image. + */ + file_id?: string; + } +} + /** * Details of the Code Interpreter tool call the run step was involved in. */ -export interface CodeToolCall { +export interface CodeInterpreterToolCall { /** * The ID of the tool call. */ @@ -67,7 +111,7 @@ export interface CodeToolCall { /** * The Code Interpreter tool call definition. */ - code_interpreter: CodeToolCall.CodeInterpreter; + code_interpreter: CodeInterpreterToolCall.CodeInterpreter; /** * The type of tool call. This is always going to be `code_interpreter` for this @@ -76,7 +120,7 @@ export interface CodeToolCall { type: 'code_interpreter'; } -export namespace CodeToolCall { +export namespace CodeInterpreterToolCall { /** * The Code Interpreter tool call definition. */ @@ -131,6 +175,51 @@ export namespace CodeToolCall { } } +/** + * Details of the Code Interpreter tool call the run step was involved in. + */ +export interface CodeInterpreterToolCallDelta { + /** + * The index of the tool call in the tool calls array. + */ + index: number; + + /** + * The type of tool call. This is always going to be `code_interpreter` for this + * type of tool call. + */ + type: 'code_interpreter'; + + /** + * The ID of the tool call. + */ + id?: string; + + /** + * The Code Interpreter tool call definition. + */ + code_interpreter?: CodeInterpreterToolCallDelta.CodeInterpreter; +} + +export namespace CodeInterpreterToolCallDelta { + /** + * The Code Interpreter tool call definition. + */ + export interface CodeInterpreter { + /** + * The input to the Code Interpreter tool call. + */ + input?: string; + + /** + * The outputs from the Code Interpreter tool call. Code Interpreter can output one + * or more items, including text (`logs`) or images (`image`). Each of these are + * represented by a different object type. + */ + outputs?: Array; + } +} + export interface FunctionToolCall { /** * The ID of the tool call object. @@ -173,6 +262,53 @@ export namespace FunctionToolCall { } } +export interface FunctionToolCallDelta { + /** + * The index of the tool call in the tool calls array. + */ + index: number; + + /** + * The type of tool call. This is always going to be `function` for this type of + * tool call. + */ + type: 'function'; + + /** + * The ID of the tool call object. + */ + id?: string; + + /** + * The definition of the function that was called. + */ + function?: FunctionToolCallDelta.Function; +} + +export namespace FunctionToolCallDelta { + /** + * The definition of the function that was called. + */ + export interface Function { + /** + * The arguments passed to the function. + */ + arguments?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * The output of the function. This will be `null` if the outputs have not been + * [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + * yet. + */ + output?: string | null; + } +} + /** * Details of the message creation by the run step. */ @@ -212,6 +348,29 @@ export interface RetrievalToolCall { type: 'retrieval'; } +export interface RetrievalToolCallDelta { + /** + * The index of the tool call in the tool calls array. + */ + index: number; + + /** + * The type of tool call. This is always going to be `retrieval` for this type of + * tool call. + */ + type: 'retrieval'; + + /** + * The ID of the tool call object. + */ + id?: string; + + /** + * For now, this is always going to be an empty object. + */ + retrieval?: unknown; +} + /** * Represents a step in execution of a run. */ @@ -347,6 +506,85 @@ export namespace RunStep { } } +/** + * The delta containing the fields that have changed on the run step. + */ +export interface RunStepDelta { + /** + * The details of the run step. + */ + step_details?: RunStepDeltaMessageDelta | ToolCallDeltaObject; +} + +/** + * Represents a run step delta i.e. any changed fields on a run step during + * streaming. + */ +export interface RunStepDeltaEvent { + /** + * The identifier of the run step, which can be referenced in API endpoints. + */ + id: string; + + /** + * The delta containing the fields that have changed on the run step. + */ + delta: RunStepDelta; + + /** + * The object type, which is always `thread.run.step.delta`. + */ + object: 'thread.run.step.delta'; +} + +/** + * Details of the message creation by the run step. + */ +export interface RunStepDeltaMessageDelta { + /** + * Always `message_creation`. + */ + type: 'message_creation'; + + message_creation?: RunStepDeltaMessageDelta.MessageCreation; +} + +export namespace RunStepDeltaMessageDelta { + export interface MessageCreation { + /** + * The ID of the message that was created by this run step. + */ + message_id?: string; + } +} + +/** + * Details of the Code Interpreter tool call the run step was involved in. + */ +export type ToolCall = CodeInterpreterToolCall | RetrievalToolCall | FunctionToolCall; + +/** + * Details of the Code Interpreter tool call the run step was involved in. + */ +export type ToolCallDelta = CodeInterpreterToolCallDelta | RetrievalToolCallDelta | FunctionToolCallDelta; + +/** + * Details of the tool call. + */ +export interface ToolCallDeltaObject { + /** + * Always `tool_calls`. + */ + type: 'tool_calls'; + + /** + * An array of tool calls the run step was involved in. These can be associated + * with one of three types of tools: `code_interpreter`, `retrieval`, or + * `function`. + */ + tool_calls?: Array; +} + /** * Details of the tool call. */ @@ -356,7 +594,7 @@ export interface ToolCallsStepDetails { * with one of three types of tools: `code_interpreter`, `retrieval`, or * `function`. */ - tool_calls: Array; + tool_calls: Array; /** * Always `tool_calls`. @@ -381,11 +619,22 @@ export interface StepListParams extends CursorPageParams { } export namespace Steps { - export import CodeToolCall = StepsAPI.CodeToolCall; + export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs; + export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage; + export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall; + export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta; export import FunctionToolCall = StepsAPI.FunctionToolCall; + export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta; export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails; export import RetrievalToolCall = StepsAPI.RetrievalToolCall; + export import RetrievalToolCallDelta = StepsAPI.RetrievalToolCallDelta; export import RunStep = StepsAPI.RunStep; + export import RunStepDelta = StepsAPI.RunStepDelta; + export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent; + export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta; + export import ToolCall = StepsAPI.ToolCall; + export import ToolCallDelta = StepsAPI.ToolCallDelta; + export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject; export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails; export import RunStepsPage = StepsAPI.RunStepsPage; export import StepListParams = StepsAPI.StepListParams; diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 5aa1f8c25..cbde41f89 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -1,12 +1,15 @@ // File generated from our OpenAPI spec by Stainless. import * as Core from 'openai/core'; +import { APIPromise } from 'openai/core'; import { APIResource } from 'openai/resource'; import { isRequestOptions } from 'openai/core'; +import { AssistantStream, ThreadCreateAndRunParamsBaseStream } from 'openai/lib/AssistantStream'; import * as ThreadsAPI from 'openai/resources/beta/threads/threads'; -import * as Shared from 'openai/resources/shared'; +import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants'; import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages'; import * as RunsAPI from 'openai/resources/beta/threads/runs/runs'; +import { Stream } from 'openai/streaming'; export class Threads extends APIResource { runs: RunsAPI.Runs = new RunsAPI.Runs(this._client); @@ -65,12 +68,38 @@ export class Threads extends APIResource { /** * Create a thread and run it in one request. */ - createAndRun(body: ThreadCreateAndRunParams, options?: Core.RequestOptions): Core.APIPromise { + createAndRun( + body: ThreadCreateAndRunParamsNonStreaming, + options?: Core.RequestOptions, + ): APIPromise; + createAndRun( + body: ThreadCreateAndRunParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + createAndRun( + body: ThreadCreateAndRunParamsBase, + options?: Core.RequestOptions, + ): APIPromise | RunsAPI.Run>; + createAndRun( + body: ThreadCreateAndRunParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { return this._client.post('/threads/runs', { body, ...options, headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, - }); + stream: body.stream ?? false, + }) as APIPromise | APIPromise>; + } + + /** + * Create a thread and stream the run back + */ + createAndRunStream( + body: ThreadCreateAndRunParamsBaseStream, + options?: Core.RequestOptions, + ): AssistantStream { + return AssistantStream.createThreadAssistantStream(body, this._client.beta.threads, options); } } @@ -168,7 +197,11 @@ export interface ThreadUpdateParams { metadata?: unknown | null; } -export interface ThreadCreateAndRunParams { +export type ThreadCreateAndRunParams = + | ThreadCreateAndRunParamsNonStreaming + | ThreadCreateAndRunParamsStreaming; + +export interface ThreadCreateAndRunParamsBase { /** * The ID of the * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to @@ -198,6 +231,13 @@ export interface ThreadCreateAndRunParams { */ model?: string | null; + /** + * If `true`, returns a stream of events that happen during the Run as server-sent + * events, terminating when the Run enters a terminal state with a `data: [DONE]` + * message. + */ + stream?: boolean | null; + /** * If no thread is provided, an empty thread will be created. */ @@ -208,9 +248,7 @@ export interface ThreadCreateAndRunParams { * modifying the behavior on a per-run basis. */ tools?: Array< - | ThreadCreateAndRunParams.AssistantToolsCode - | ThreadCreateAndRunParams.AssistantToolsRetrieval - | ThreadCreateAndRunParams.AssistantToolsFunction + AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool > | null; } @@ -265,27 +303,121 @@ export namespace ThreadCreateAndRunParams { } } - export interface AssistantToolsCode { + export type ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; + export type ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; +} + +export interface ThreadCreateAndRunParamsNonStreaming extends ThreadCreateAndRunParamsBase { + /** + * If `true`, returns a stream of events that happen during the Run as server-sent + * events, terminating when the Run enters a terminal state with a `data: [DONE]` + * message. + */ + stream?: false | null; +} + +export interface ThreadCreateAndRunParamsStreaming extends ThreadCreateAndRunParamsBase { + /** + * If `true`, returns a stream of events that happen during the Run as server-sent + * events, terminating when the Run enters a terminal state with a `data: [DONE]` + * message. + */ + stream: true; +} + +export interface ThreadCreateAndRunStreamParams { + /** + * The ID of the + * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + * execute this run. + */ + assistant_id: string; + + /** + * Override the default system message of the assistant. This is useful for + * modifying the behavior on a per-run basis. + */ + instructions?: string | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + + /** + * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + * be used to execute this run. If a value is provided here, it will override the + * model associated with the assistant. If not, the model associated with the + * assistant will be used. + */ + model?: string | null; + + /** + * If no thread is provided, an empty thread will be created. + */ + thread?: ThreadCreateAndRunStreamParams.Thread; + + /** + * Override the tools the assistant can use for this run. This is useful for + * modifying the behavior on a per-run basis. + */ + tools?: Array< + AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool + > | null; +} + +export namespace ThreadCreateAndRunStreamParams { + /** + * If no thread is provided, an empty thread will be created. + */ + export interface Thread { /** - * The type of tool being defined: `code_interpreter` + * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + * start the thread with. */ - type: 'code_interpreter'; - } + messages?: Array; - export interface AssistantToolsRetrieval { /** - * The type of tool being defined: `retrieval` + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. */ - type: 'retrieval'; + metadata?: unknown | null; } - export interface AssistantToolsFunction { - function: Shared.FunctionDefinition; + export namespace Thread { + export interface Message { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the entity that is creating the message. Currently only `user` is + * supported. + */ + role: 'user'; - /** - * The type of tool being defined: `function` - */ - type: 'function'; + /** + * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + * the message should use. There can be a maximum of 10 files attached to a + * message. Useful for tools like `retrieval` and `code_interpreter` that can + * access and use files. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + } } } @@ -295,21 +427,46 @@ export namespace Threads { export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams; export import ThreadUpdateParams = ThreadsAPI.ThreadUpdateParams; export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams; + export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; + export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; + export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams; export import Runs = RunsAPI.Runs; export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall; export import Run = RunsAPI.Run; export import RunStatus = RunsAPI.RunStatus; export import RunsPage = RunsAPI.RunsPage; export import RunCreateParams = RunsAPI.RunCreateParams; + export import RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; + export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; export import RunUpdateParams = RunsAPI.RunUpdateParams; export import RunListParams = RunsAPI.RunListParams; + export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams; export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams; + export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming; + export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming; + export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams; export import Messages = MessagesAPI.Messages; - export import MessageContentImageFile = MessagesAPI.MessageContentImageFile; - export import MessageContentText = MessagesAPI.MessageContentText; - export import ThreadMessage = MessagesAPI.ThreadMessage; - export import ThreadMessageDeleted = MessagesAPI.ThreadMessageDeleted; - export import ThreadMessagesPage = MessagesAPI.ThreadMessagesPage; + export import Annotation = MessagesAPI.Annotation; + export import AnnotationDelta = MessagesAPI.AnnotationDelta; + export import FileCitationAnnotation = MessagesAPI.FileCitationAnnotation; + export import FileCitationDeltaAnnotation = MessagesAPI.FileCitationDeltaAnnotation; + export import FilePathAnnotation = MessagesAPI.FilePathAnnotation; + export import FilePathDeltaAnnotation = MessagesAPI.FilePathDeltaAnnotation; + export import ImageFile = MessagesAPI.ImageFile; + export import ImageFileContentBlock = MessagesAPI.ImageFileContentBlock; + export import ImageFileDelta = MessagesAPI.ImageFileDelta; + export import ImageFileDeltaBlock = MessagesAPI.ImageFileDeltaBlock; + export import Message = MessagesAPI.Message; + export import MessageContent = MessagesAPI.MessageContent; + export import MessageContentDelta = MessagesAPI.MessageContentDelta; + export import MessageDeleted = MessagesAPI.MessageDeleted; + export import MessageDelta = MessagesAPI.MessageDelta; + export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent; + export import Text = MessagesAPI.Text; + export import TextContentBlock = MessagesAPI.TextContentBlock; + export import TextDelta = MessagesAPI.TextDelta; + export import TextDeltaBlock = MessagesAPI.TextDeltaBlock; + export import MessagesPage = MessagesAPI.MessagesPage; export import MessageCreateParams = MessagesAPI.MessageCreateParams; export import MessageUpdateParams = MessagesAPI.MessageUpdateParams; export import MessageListParams = MessagesAPI.MessageListParams; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index c2d6da0be..41216a8e3 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -829,7 +829,7 @@ export interface ChatCompletionCreateParamsBase { /** * A list of tools the model may call. Currently, only functions are supported as a * tool. Use this to provide a list of functions the model may generate JSON inputs - * for. + * for. A max of 128 functions are supported. */ tools?: Array; diff --git a/src/resources/completions.ts b/src/resources/completions.ts index f3e262f5f..83ecb3e99 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -253,6 +253,8 @@ export interface CompletionCreateParamsBase { /** * The suffix that comes after a completion of inserted text. + * + * This parameter is only supported for `gpt-3.5-turbo-instruct`. */ suffix?: string | null; diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 05ab66383..a6b2c11bd 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,5 +1,15 @@ // File generated from our OpenAPI spec by Stainless. +export interface ErrorObject { + code: string | null; + + message: string; + + param: string | null; + + type: string; +} + export interface FunctionDefinition { /** * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain diff --git a/src/streaming.ts b/src/streaming.ts index f90c5d89a..c452737aa 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -78,6 +78,20 @@ export class Stream implements AsyncIterable { } yield data; + } else { + let data; + try { + data = JSON.parse(sse.data); + } catch (e) { + console.error(`Could not parse message into JSON:`, sse.data); + console.error(`From chunk:`, sse.raw); + throw e; + } + // TODO: Is this where the error should be thrown? + if (sse.event == 'error') { + throw new APIError(undefined, data.error, data.message, undefined); + } + yield { event: sse.event, data: data } as any; } } done = true; diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 5a720afce..45f17040a 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -27,6 +27,7 @@ describe('resource runs', () => { instructions: 'string', metadata: {}, model: 'string', + stream: false, tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], }); }); @@ -127,6 +128,7 @@ describe('resource runs', () => { { tool_call_id: 'string', output: 'string' }, { tool_call_id: 'string', output: 'string' }, ], + stream: false, }); }); }); diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index fc9fef723..9243dc11c 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -108,6 +108,7 @@ describe('resource threads', () => { instructions: 'string', metadata: {}, model: 'string', + stream: false, thread: { messages: [ { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, diff --git a/tests/streaming/assistants/assistant.test.ts b/tests/streaming/assistants/assistant.test.ts new file mode 100644 index 000000000..e8db3d585 --- /dev/null +++ b/tests/streaming/assistants/assistant.test.ts @@ -0,0 +1,32 @@ +import OpenAI from 'openai'; +import { AssistantStream } from 'openai/lib/AssistantStream'; + +const openai = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('assistant tests', () => { + test('delta accumulation', () => { + expect(AssistantStream.accumulateDelta({}, {})).toEqual({}); + expect(AssistantStream.accumulateDelta({}, { a: 'apple' })).toEqual({ a: 'apple' }); + + // strings + expect(AssistantStream.accumulateDelta({ a: 'foo' }, { a: ' bar' })).toEqual({ a: 'foo bar' }); + + // dictionaries + expect(AssistantStream.accumulateDelta({ a: { foo: '1' } }, { a: { bar: '2' } })).toEqual({ + a: { + foo: '1', + bar: '2', + }, + }); + expect(AssistantStream.accumulateDelta({ a: { foo: 'hello,' } }, { a: { foo: ' world' } })).toEqual({ + a: { foo: 'hello, world' }, + }); + + expect(AssistantStream.accumulateDelta({}, { a: null })).toEqual({ a: null }); + expect(AssistantStream.accumulateDelta({ a: null }, { a: 'apple' })).toEqual({ a: 'apple' }); + expect(AssistantStream.accumulateDelta({ a: null }, { a: null })).toEqual({ a: null }); + }); +}); From 993669b502416096e4fa3b9f300bf0746ecbec63 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 13 Mar 2024 16:31:06 -0400 Subject: [PATCH 024/533] release: 4.29.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 2813cb972..2f6cf24a7 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.28.5" + ".": "4.29.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 8798e4b66..19dfcc620 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.29.0 (2024-03-13) + +Full Changelog: [v4.28.5...v4.29.0](https://github.com/openai/openai-node/compare/v4.28.5...v4.29.0) + +### Features + +* **assistants:** add support for streaming ([#714](https://github.com/openai/openai-node/issues/714)) ([7d27d28](https://github.com/openai/openai-node/commit/7d27d286876d0a575d91a4752f401126fe93d2a3)) + ## 4.28.5 (2024-03-13) Full Changelog: [v4.28.4...v4.28.5](https://github.com/openai/openai-node/compare/v4.28.4...v4.28.5) diff --git a/README.md b/README.md index 24d38ac79..93ae9f044 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.28.5/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.29.0/mod.ts'; ``` diff --git a/build-deno b/build-deno index fb739cc50..c49755fda 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.28.5/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.29.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index d51c4ca96..ce6396c2e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.28.5", + "version": "4.29.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 516e764d1..0de2f3538 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.28.5'; // x-release-please-version +export const VERSION = '4.29.0'; // x-release-please-version From bc9a1ca308020a88c29d409edc06cdfca8cbf8f5 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 15 Mar 2024 13:31:36 -0400 Subject: [PATCH 025/533] docs(readme): assistant streaming (#719) --- helpers.md | 158 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 145 insertions(+), 13 deletions(-) diff --git a/helpers.md b/helpers.md index 76423ee07..9f01a126a 100644 --- a/helpers.md +++ b/helpers.md @@ -1,6 +1,140 @@ -# Chat Completion Helpers +# Streaming Helpers -## Streaming Responses +OpenAI supports streaming responses when interacting with the [Chat](#chat-streaming) or [Assistant](#assistant-streaming-api) APIs. + +## Assistant Streaming API + +OpenAI supports streaming responses from Assistants. The SDK provides convenience wrappers around the API +so you can subscribe to the types of events you are interested in as well as receive accumulated responses. + +More information can be found in the documentation: [Assistant Streaming](https://platform.openai.com/docs/assistants/overview?lang=node.js) + +#### An example of creating a run and subscribing to some events + +```ts +const run = openai.beta.threads.runs + .createAndStream(thread.id, { + assistant_id: assistant.id, + }) + .on('textCreated', (text) => process.stdout.write('\nassistant > ')) + .on('textDelta', (textDelta, snapshot) => process.stdout.write(textDelta.value)) + .on('toolCallCreated', (toolCall) => process.stdout.write(`\nassistant > ${toolCall.type}\n\n`)) + .on('toolCallDelta', (toolCallDelta, snapshot) => { + if (toolCallDelta.type === 'code_interpreter') { + if (toolCallDelta.code_interpreter.input) { + process.stdout.write(toolCallDelta.code_interpreter.input); + } + if (toolCallDelta.code_interpreter.outputs) { + process.stdout.write('\noutput >\n'); + toolCallDelta.code_interpreter.outputs.forEach((output) => { + if (output.type === 'logs') { + process.stdout.write(`\n${output.logs}\n`); + } + }); + } + } + }); +``` + +### Assistant Events + +The assistant API provides events you can subscribe to for the following events. + +```ts +.on('event', (event: AssistantStreamEvent) => ...) +``` + +This allows you to subscribe to all the possible raw events sent by the OpenAI streaming API. +In many cases it will be more convenient to subscribe to a more specific set of events for your use case. + +More information on the types of events can be found here: [Events](https://platform.openai.com/docs/api-reference/assistants-streaming/events) + +```ts +.on('runStepCreated', (runStep: RunStep) => ...) +.on('runStepDelta', (delta: RunStepDelta, snapshot: RunStep) => ...) +.on('runStepDone', (runStep: RunStep) => ...) +``` + +These events allow you to subscribe to the creation, delta and completion of a RunStep. + +For more information on how Runs and RunSteps work see the documentation [Runs and RunSteps](https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps) + +```ts +.on('messageCreated', (message: Message) => ...) +.on('messageDelta', (delta: MessageDelta, snapshot: Message) => ...) +.on('messageDone', (message: Message) => ...) +``` + +This allows you to subscribe to Message creation, delta and completion events. Messages can contain +different types of content that can be sent from a model (and events are available for specific content types). +For convenience, the delta event includes both the incremental update and an accumulated snapshot of the content. + +More information on messages can be found +on in the documentation page [Message](https://platform.openai.com/docs/api-reference/messages/object). + +```ts +.on('textCreated', (content: Text) => ...) +.on('textDelta', (delta: RunStepDelta, snapshot: Text) => ...) +.on('textDone', (content: Text, snapshot: Message) => ...) +``` + +These events allow you to subscribe to the creation, delta and completion of a Text content (a specific type of message). +For convenience, the delta event includes both the incremental update and an accumulated snapshot of the content. + +```ts +.on('imageFileDone', (content: ImageFile, snapshot: Message) => ...) +``` + +Image files are not sent incrementally so an event is provided for when a image file is available. + +```ts +.on('toolCallCreated', (toolCall: ToolCall) => ...) +.on('toolCallDelta', (delta: RunStepDelta, snapshot: ToolCall) => ...) +.on('toolCallDone', (toolCall: ToolCall) => ...) +``` + +These events allow you to subscribe to events for the creation, delta and completion of a ToolCall. + +More information on tools can be found here [Tools](https://platform.openai.com/docs/assistants/tools) + +```ts +.on('end', () => ...) +``` + +The last event send when a stream ends. + +### Assistant Methods + +The assistant streaming object also provides a few methods for convenience: + +```ts +.currentEvent() + +.currentRun() + +.currentMessageSnapshot() + +.currentRunStepSnapshot() +``` + +These methods are provided to allow you to access additional context from within event handlers. In many cases +the handlers should include all the information you need for processing, but if additional context is required it +can be accessed. + +Note: There is not always a relevant context in certain situations (these will be undefined in those cases). + +```ts +await.finalMessages(); + +await.finalRunSteps(); +``` + +These methods are provided for convenience to collect information at the end of a stream. Calling these events +will trigger consumption of the stream until completion and then return the relevant accumulated objects. + +## Chat Streaming + +### Streaming Responses ```ts openai.chat.completions.stream({ stream?: false, … }, options?): ChatCompletionStreamingRunner @@ -18,7 +152,7 @@ If you need to cancel a stream, you can `break` from a `for await` loop or call See an example of streaming helpers in action in [`examples/stream.ts`](examples/stream.ts). -## Automated Function Calls +### Automated Function Calls ```ts openai.chat.completions.runTools({ stream: false, … }, options?): ChatCompletionRunner @@ -69,9 +203,7 @@ See an example of automated function calls in action in Note, `runFunctions` was also previously available, but has been deprecated in favor of `runTools`. -## Runner API - -### Events +### Chat Events #### `.on('connect', () => …)` @@ -148,7 +280,7 @@ The event fired at the end, returning the total usage of the call. The last event fired in the stream. -### Methods +### Chat Methods #### `.abort()` @@ -190,7 +322,7 @@ A promise which resolves with the last message with a `role: "function"`. Throws A promise which resolves with the total usage. -### Fields +### Chat Fields #### `.messages` @@ -200,9 +332,9 @@ A mutable array of all messages in the conversation. The underlying `AbortController` for the runner. -## Examples +### Chat Examples -### Abort on a function call +#### Abort on a function call If you have a function call flow which you intend to _end_ with a certain function call, then you can use the second argument `runner` given to the function to either mutate `runner.messages` or call `runner.abort()`. @@ -238,7 +370,7 @@ async function main() { main(); ``` -### Integrate with `zod` +#### Integrate with `zod` [`zod`](https://www.npmjs.com/package/zod) is a schema validation library which can help with validating the assistant's response to make sure it conforms to a schema. Paired with [`zod-to-json-schema`](https://www.npmjs.com/package/zod-to-json-schema), the validation schema also acts as the `parameters` JSON Schema passed to the API. @@ -287,10 +419,10 @@ main(); See a more fully-fledged example in [`examples/function-call-helpers-zod.ts`](examples/function-call-helpers-zod.ts). -### Integrate with Next.JS +#### Integrate with Next.JS See an example of a Next.JS integration here [`examples/stream-to-client-next.ts`](examples/stream-to-client-next.ts). -### Proxy Streaming to a Browser +#### Proxy Streaming to a Browser See an example of using express to stream to a browser here [`examples/stream-to-client-express.ts`](examples/stream-to-client-express.ts). From 4760ccc4be8b0951414eb443d186d3c506731195 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 15 Mar 2024 13:31:56 -0400 Subject: [PATCH 026/533] release: 4.29.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 2f6cf24a7..ded4849c4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.29.0" + ".": "4.29.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 19dfcc620..741a701b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.29.1 (2024-03-15) + +Full Changelog: [v4.29.0...v4.29.1](https://github.com/openai/openai-node/compare/v4.29.0...v4.29.1) + +### Documentation + +* **readme:** assistant streaming ([#719](https://github.com/openai/openai-node/issues/719)) ([bc9a1ca](https://github.com/openai/openai-node/commit/bc9a1ca308020a88c29d409edc06cdfca8cbf8f5)) + ## 4.29.0 (2024-03-13) Full Changelog: [v4.28.5...v4.29.0](https://github.com/openai/openai-node/compare/v4.28.5...v4.29.0) diff --git a/README.md b/README.md index 93ae9f044..68d337e81 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.29.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.29.1/mod.ts'; ``` diff --git a/build-deno b/build-deno index c49755fda..c08e26dbc 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.29.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.29.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index ce6396c2e..8a6398765 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.29.0", + "version": "4.29.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 0de2f3538..8acef11c3 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.29.0'; // x-release-please-version +export const VERSION = '4.29.1'; // x-release-please-version From 05ff8f7671fe6ce5d9517034f76a166a0bd27803 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 18 Mar 2024 22:27:15 -0400 Subject: [PATCH 027/533] docs: fix typo in CONTRIBUTING.md (#722) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d9e64025d..9e8f669a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -68,7 +68,7 @@ pnpm link -—global openai Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. ```bash -npx prism path/to/your/openapi.yml +npx prism mock path/to/your/openapi.yml ``` ```bash From 139e205ed1ed30cb1df982d852a093dcea945aba Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 19 Mar 2024 06:42:30 -0400 Subject: [PATCH 028/533] chore(internal): update generated pragma comment (#724) --- src/error.ts | 2 +- src/index.ts | 2 +- src/pagination.ts | 2 +- src/resource.ts | 2 +- src/resources/audio/audio.ts | 2 +- src/resources/audio/index.ts | 2 +- src/resources/audio/speech.ts | 2 +- src/resources/audio/transcriptions.ts | 2 +- src/resources/audio/translations.ts | 2 +- src/resources/beta/assistants/assistants.ts | 2 +- src/resources/beta/assistants/files.ts | 2 +- src/resources/beta/assistants/index.ts | 2 +- src/resources/beta/beta.ts | 2 +- src/resources/beta/chat/chat.ts | 2 +- src/resources/beta/chat/completions.ts | 2 +- src/resources/beta/chat/index.ts | 2 +- src/resources/beta/index.ts | 2 +- src/resources/beta/threads/index.ts | 2 +- src/resources/beta/threads/messages/files.ts | 2 +- src/resources/beta/threads/messages/index.ts | 2 +- src/resources/beta/threads/messages/messages.ts | 2 +- src/resources/beta/threads/runs/index.ts | 2 +- src/resources/beta/threads/runs/runs.ts | 2 +- src/resources/beta/threads/runs/steps.ts | 2 +- src/resources/beta/threads/threads.ts | 2 +- src/resources/chat/chat.ts | 2 +- src/resources/chat/completions.ts | 2 +- src/resources/chat/index.ts | 2 +- src/resources/completions.ts | 2 +- src/resources/embeddings.ts | 2 +- src/resources/files.ts | 2 +- src/resources/fine-tuning/fine-tuning.ts | 2 +- src/resources/fine-tuning/index.ts | 2 +- src/resources/fine-tuning/jobs.ts | 2 +- src/resources/images.ts | 2 +- src/resources/index.ts | 2 +- src/resources/models.ts | 2 +- src/resources/moderations.ts | 2 +- src/resources/shared.ts | 2 +- tests/api-resources/audio/speech.test.ts | 2 +- tests/api-resources/audio/transcriptions.test.ts | 2 +- tests/api-resources/audio/translations.test.ts | 2 +- tests/api-resources/beta/assistants/assistants.test.ts | 2 +- tests/api-resources/beta/assistants/files.test.ts | 2 +- tests/api-resources/beta/threads/messages/files.test.ts | 2 +- tests/api-resources/beta/threads/messages/messages.test.ts | 2 +- tests/api-resources/beta/threads/runs/runs.test.ts | 2 +- tests/api-resources/beta/threads/runs/steps.test.ts | 2 +- tests/api-resources/beta/threads/threads.test.ts | 2 +- tests/api-resources/chat/completions.test.ts | 2 +- tests/api-resources/completions.test.ts | 2 +- tests/api-resources/embeddings.test.ts | 2 +- tests/api-resources/files.test.ts | 2 +- tests/api-resources/fine-tuning/jobs.test.ts | 2 +- tests/api-resources/images.test.ts | 2 +- tests/api-resources/models.test.ts | 2 +- tests/api-resources/moderations.test.ts | 2 +- tests/index.test.ts | 2 +- 58 files changed, 58 insertions(+), 58 deletions(-) diff --git a/src/error.ts b/src/error.ts index fd7477ad2..deac34c5d 100644 --- a/src/error.ts +++ b/src/error.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { castToError, Headers } from './core'; diff --git a/src/index.ts b/src/index.ts index 7b3033fa9..9a2b2eaad 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from './core'; import * as Errors from './error'; diff --git a/src/pagination.ts b/src/pagination.ts index 5d890a140..63644e333 100644 --- a/src/pagination.ts +++ b/src/pagination.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { AbstractPage, Response, APIClient, FinalRequestOptions, PageInfo } from './core'; diff --git a/src/resource.ts b/src/resource.ts index 0bf87cf33..87847c879 100644 --- a/src/resource.ts +++ b/src/resource.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import type { OpenAI } from './index'; diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index 960577b0d..f3fcba4c3 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from 'openai/resource'; import * as SpeechAPI from 'openai/resources/audio/speech'; diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts index 17c81d3bb..31732a267 100644 --- a/src/resources/audio/index.ts +++ b/src/resources/audio/index.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { Audio } from './audio'; export { SpeechCreateParams, Speech } from './speech'; diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 7d0ee2195..4b83bae3e 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index ab2079ed6..f01e8556d 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index e68a714fb..234933236 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/beta/assistants/assistants.ts b/src/resources/beta/assistants/assistants.ts index b4e92fd92..1e8ca6ee9 100644 --- a/src/resources/beta/assistants/assistants.ts +++ b/src/resources/beta/assistants/assistants.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/beta/assistants/files.ts b/src/resources/beta/assistants/files.ts index 7de700e50..51fd0c0d8 100644 --- a/src/resources/beta/assistants/files.ts +++ b/src/resources/beta/assistants/files.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/beta/assistants/index.ts b/src/resources/beta/assistants/index.ts index 0ae8c9c67..c191d338b 100644 --- a/src/resources/beta/assistants/index.ts +++ b/src/resources/beta/assistants/index.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { Assistant, diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 74056ed1d..43ee8c7e7 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from 'openai/resource'; import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants'; diff --git a/src/resources/beta/chat/chat.ts b/src/resources/beta/chat/chat.ts index a9cadc681..2b4a7a404 100644 --- a/src/resources/beta/chat/chat.ts +++ b/src/resources/beta/chat/chat.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from 'openai/resource'; import * as CompletionsAPI from 'openai/resources/beta/chat/completions'; diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts index e7f89f5cf..95fd0ac79 100644 --- a/src/resources/beta/chat/completions.ts +++ b/src/resources/beta/chat/completions.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/beta/chat/index.ts b/src/resources/beta/chat/index.ts index 8d0ee40ae..23b1b8ff3 100644 --- a/src/resources/beta/chat/index.ts +++ b/src/resources/beta/chat/index.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { Chat } from './chat'; export { Completions } from './completions'; diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index d8770c29a..7f35730fb 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { Assistant, diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index 3585be846..097a52819 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { Annotation, diff --git a/src/resources/beta/threads/messages/files.ts b/src/resources/beta/threads/messages/files.ts index 72c01bb97..994b09d5f 100644 --- a/src/resources/beta/threads/messages/files.ts +++ b/src/resources/beta/threads/messages/files.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/beta/threads/messages/index.ts b/src/resources/beta/threads/messages/index.ts index f68edbbd4..ef446d012 100644 --- a/src/resources/beta/threads/messages/index.ts +++ b/src/resources/beta/threads/messages/index.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { Annotation, diff --git a/src/resources/beta/threads/messages/messages.ts b/src/resources/beta/threads/messages/messages.ts index b38a4bbf0..a2f2aaf1c 100644 --- a/src/resources/beta/threads/messages/messages.ts +++ b/src/resources/beta/threads/messages/messages.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts index 7fa34637a..636b5d850 100644 --- a/src/resources/beta/threads/runs/index.ts +++ b/src/resources/beta/threads/runs/index.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { CodeInterpreterLogs, diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 8fe09ecc6..a28dd9ae9 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIPromise } from 'openai/core'; diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index 4218e9769..f0816fdb2 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index cbde41f89..266f6709e 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIPromise } from 'openai/core'; diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 07c7700dc..6c7bccb22 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from 'openai/resource'; import * as CompletionsAPI from 'openai/resources/chat/completions'; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 41216a8e3..8119639f2 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIPromise } from 'openai/core'; diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index b8b69e453..78a7516ed 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { Chat } from './chat'; export { diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 83ecb3e99..b64c3a166 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIPromise } from 'openai/core'; diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index 3f59d2a7c..208ceb240 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/files.ts b/src/resources/files.ts index cda487a63..820c7a1fa 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index 5d2d27ac3..e62f8f09c 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from 'openai/resource'; import * as JobsAPI from 'openai/resources/fine-tuning/jobs'; diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts index c2cac49ac..2885f62f4 100644 --- a/src/resources/fine-tuning/index.ts +++ b/src/resources/fine-tuning/index.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { FineTuning } from './fine-tuning'; export { diff --git a/src/resources/fine-tuning/jobs.ts b/src/resources/fine-tuning/jobs.ts index 7bc216d7c..eb77405ca 100644 --- a/src/resources/fine-tuning/jobs.ts +++ b/src/resources/fine-tuning/jobs.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/images.ts b/src/resources/images.ts index bc5b9edc0..95f0b6ff2 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/index.ts b/src/resources/index.ts index 16ce85123..a9741f5fd 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export * from './chat/index'; export * from './shared'; diff --git a/src/resources/models.ts b/src/resources/models.ts index 6c6c3379c..4d5bc57e9 100644 --- a/src/resources/models.ts +++ b/src/resources/models.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index a43006ccf..b9b9d7fc6 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; diff --git a/src/resources/shared.ts b/src/resources/shared.ts index a6b2c11bd..93fa05fa4 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export interface ErrorObject { code: string | null; diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts index b0cf1a71c..18302ce9a 100644 --- a/tests/api-resources/audio/speech.test.ts +++ b/tests/api-resources/audio/speech.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts index 33652af53..3fc4ca22b 100644 --- a/tests/api-resources/audio/transcriptions.test.ts +++ b/tests/api-resources/audio/transcriptions.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI, { toFile } from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/audio/translations.test.ts b/tests/api-resources/audio/translations.test.ts index 723625f6e..0853bedfb 100644 --- a/tests/api-resources/audio/translations.test.ts +++ b/tests/api-resources/audio/translations.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI, { toFile } from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/beta/assistants/assistants.test.ts b/tests/api-resources/beta/assistants/assistants.test.ts index 60ca0a6e2..b11075d06 100644 --- a/tests/api-resources/beta/assistants/assistants.test.ts +++ b/tests/api-resources/beta/assistants/assistants.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/beta/assistants/files.test.ts b/tests/api-resources/beta/assistants/files.test.ts index 8db328442..e285b4664 100644 --- a/tests/api-resources/beta/assistants/files.test.ts +++ b/tests/api-resources/beta/assistants/files.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/beta/threads/messages/files.test.ts b/tests/api-resources/beta/threads/messages/files.test.ts index b4a00a868..58c8813fe 100644 --- a/tests/api-resources/beta/threads/messages/files.test.ts +++ b/tests/api-resources/beta/threads/messages/files.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/beta/threads/messages/messages.test.ts b/tests/api-resources/beta/threads/messages/messages.test.ts index 35538efb9..3a80bfe1e 100644 --- a/tests/api-resources/beta/threads/messages/messages.test.ts +++ b/tests/api-resources/beta/threads/messages/messages.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 45f17040a..5e1b363fd 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/beta/threads/runs/steps.test.ts b/tests/api-resources/beta/threads/runs/steps.test.ts index 76eec269a..76495a1a3 100644 --- a/tests/api-resources/beta/threads/runs/steps.test.ts +++ b/tests/api-resources/beta/threads/runs/steps.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index 9243dc11c..24cb815a7 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 49f3562b0..e0ccb3910 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts index 85fc68498..2641bf7e3 100644 --- a/tests/api-resources/completions.test.ts +++ b/tests/api-resources/completions.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts index bcb5ffbba..d4e1f3240 100644 --- a/tests/api-resources/embeddings.test.ts +++ b/tests/api-resources/embeddings.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index 9e6373aba..514f42e3a 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI, { toFile } from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/fine-tuning/jobs.test.ts b/tests/api-resources/fine-tuning/jobs.test.ts index 22f457303..d8f230abd 100644 --- a/tests/api-resources/fine-tuning/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index 418a55eb0..33d633a63 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI, { toFile } from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/models.test.ts b/tests/api-resources/models.test.ts index 91eb0d055..ca1f98365 100644 --- a/tests/api-resources/models.test.ts +++ b/tests/api-resources/models.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/api-resources/moderations.test.ts b/tests/api-resources/moderations.test.ts index ad315df5d..ef7298fa9 100644 --- a/tests/api-resources/moderations.test.ts +++ b/tests/api-resources/moderations.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { Response } from 'node-fetch'; diff --git a/tests/index.test.ts b/tests/index.test.ts index 3fb42a80a..cd5f2a0a9 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -1,4 +1,4 @@ -// File generated from our OpenAPI spec by Stainless. +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import OpenAI from 'openai'; import { APIUserAbortError } from 'openai'; From 6a2c41b0ce833eba0cdea6a7d221697f3be26abb Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 19 Mar 2024 10:13:42 -0400 Subject: [PATCH 029/533] docs: assistant improvements (#725) --- README.md | 31 +++++++++++++++++++++++++++++++ helpers.md | 37 ++++++++++++++++++++++++++++++------- 2 files changed, 61 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 68d337e81..1eca06c85 100644 --- a/README.md +++ b/README.md @@ -100,6 +100,37 @@ Documentation for each method, request param, and response field are available i > [!IMPORTANT] > Previous versions of this SDK used a `Configuration` class. See the [v3 to v4 migration guide](https://github.com/openai/openai-node/discussions/217). +### Streaming Helpers + +The SDK also includes helpers to process streams and handle the incoming events. + +```ts +const run = openai.beta.threads.runs + .createAndStream(thread.id, { + assistant_id: assistant.id, + }) + .on('textCreated', (text) => process.stdout.write('\nassistant > ')) + .on('textDelta', (textDelta, snapshot) => process.stdout.write(textDelta.value)) + .on('toolCallCreated', (toolCall) => process.stdout.write(`\nassistant > ${toolCall.type}\n\n`)) + .on('toolCallDelta', (toolCallDelta, snapshot) => { + if (toolCallDelta.type === 'code_interpreter') { + if (toolCallDelta.code_interpreter.input) { + process.stdout.write(toolCallDelta.code_interpreter.input); + } + if (toolCallDelta.code_interpreter.outputs) { + process.stdout.write('\noutput >\n'); + toolCallDelta.code_interpreter.outputs.forEach((output) => { + if (output.type === 'logs') { + process.stdout.write(`\n${output.logs}\n`); + } + }); + } + } + }); +``` + +More information on streaming helpers can be found in the dedicated documentation: [helpers.md](helpers.md) + ### Streaming responses This library provides several conveniences for streaming chat completions, for example: diff --git a/helpers.md b/helpers.md index 9f01a126a..9a94a618e 100644 --- a/helpers.md +++ b/helpers.md @@ -36,6 +36,29 @@ const run = openai.beta.threads.runs }); ``` +### Starting a stream + +There are three helper methods for creating streams: + +```ts +openai.beta.threads.runs.createAndStream(); +``` + +This method can be used to start and stream the response to an existing run with an associated thread +that is already populated with messages. + +```ts +openai.beta.threads.createAndRunStream(); +``` + +This method can be used to add a message to a thread, start a run and then stream the response. + +```ts +openai.beta.threads.runs.submitToolOutputsStream(); +``` + +This method can be used to submit a tool output to a run waiting on the output and start a stream. + ### Assistant Events The assistant API provides events you can subscribe to for the following events. @@ -108,25 +131,25 @@ The last event send when a stream ends. The assistant streaming object also provides a few methods for convenience: ```ts -.currentEvent() +.currentEvent(): AssistantStreamEvent | undefined -.currentRun() +.currentRun(): Run | undefined -.currentMessageSnapshot() +.currentMessageSnapshot(): Message -.currentRunStepSnapshot() +.currentRunStepSnapshot(): Runs.RunStep ``` These methods are provided to allow you to access additional context from within event handlers. In many cases the handlers should include all the information you need for processing, but if additional context is required it can be accessed. -Note: There is not always a relevant context in certain situations (these will be undefined in those cases). +Note: There is not always a relevant context in certain situations (these will be `undefined` in those cases). ```ts -await.finalMessages(); +await .finalMessages() : Promise -await.finalRunSteps(); +await .finalRunSteps(): Promise ``` These methods are provided for convenience to collect information at the end of a stream. Calling these events From dda3f6890cf6a6b8f885a6470240b3036eab3b09 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 19 Mar 2024 10:14:03 -0400 Subject: [PATCH 030/533] release: 4.29.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ded4849c4..fc4efb3a0 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.29.1" + ".": "4.29.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 741a701b3..497a341af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.29.2 (2024-03-19) + +Full Changelog: [v4.29.1...v4.29.2](https://github.com/openai/openai-node/compare/v4.29.1...v4.29.2) + +### Chores + +* **internal:** update generated pragma comment ([#724](https://github.com/openai/openai-node/issues/724)) ([139e205](https://github.com/openai/openai-node/commit/139e205ed1ed30cb1df982d852a093dcea945aba)) + + +### Documentation + +* assistant improvements ([#725](https://github.com/openai/openai-node/issues/725)) ([6a2c41b](https://github.com/openai/openai-node/commit/6a2c41b0ce833eba0cdea6a7d221697f3be26abb)) +* fix typo in CONTRIBUTING.md ([#722](https://github.com/openai/openai-node/issues/722)) ([05ff8f7](https://github.com/openai/openai-node/commit/05ff8f7671fe6ce5d9517034f76a166a0bd27803)) + ## 4.29.1 (2024-03-15) Full Changelog: [v4.29.0...v4.29.1](https://github.com/openai/openai-node/compare/v4.29.0...v4.29.1) diff --git a/README.md b/README.md index 1eca06c85..9699fca42 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.29.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.29.2/mod.ts'; ``` diff --git a/build-deno b/build-deno index c08e26dbc..25569475f 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.29.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.29.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 8a6398765..25994c236 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.29.1", + "version": "4.29.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 8acef11c3..a9177ff54 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.29.1'; // x-release-please-version +export const VERSION = '4.29.2'; // x-release-please-version From 7d87199f5245e9c5a4ebee34e15838ae5ce47100 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 19 Mar 2024 19:00:03 -0400 Subject: [PATCH 031/533] fix(internal): make toFile use input file's options (#727) --- src/uploads.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/uploads.ts b/src/uploads.ts index 2398baf35..081827c9a 100644 --- a/src/uploads.ts +++ b/src/uploads.ts @@ -102,11 +102,14 @@ export type ToFileInput = Uploadable | Exclude | AsyncIter export async function toFile( value: ToFileInput | PromiseLike, name?: string | null | undefined, - options: FilePropertyBag | undefined = {}, + options?: FilePropertyBag | undefined, ): Promise { // If it's a promise, resolve it. value = await value; + // Use the file's options if there isn't one provided + options ??= isFileLike(value) ? { lastModified: value.lastModified, type: value.type } : {}; + if (isResponseLike(value)) { const blob = await value.blob(); name ||= new URL(value.url).pathname.split(/[\\/]/).pop() ?? 'unknown_file'; From 3c59fa750cf25fc65395482794b8c3b90f826674 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 20 Mar 2024 16:01:47 -0400 Subject: [PATCH 032/533] docs(readme): consistent use of sentence case in headings (#729) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 9699fca42..a7bf62f94 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ async function main() { main(); ``` -## Streaming Responses +## Streaming responses We provide support for streaming responses using Server Sent Events (SSE). @@ -256,7 +256,7 @@ Note that `runFunctions` was previously available as well, but has been deprecat Read more about various examples such as with integrating with [zod](helpers.md#integrate-with-zod), [next.js](helpers.md#integrate-wtih-next-js), and [proxying a stream to the browser](helpers.md#proxy-streaming-to-a-browser). -## File Uploads +## File uploads Request parameters that correspond to file uploads can be passed in many different forms: @@ -497,7 +497,7 @@ await openai.models.list({ }); ``` -## Semantic Versioning +## Semantic versioning This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: From a7cc3e15bf2ed64bf02a559d2956a3f89f43e5ff Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 20 Mar 2024 23:26:10 -0400 Subject: [PATCH 033/533] docs(readme): document how to make undocumented requests (#730) --- README.md | 50 ++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 48 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a7bf62f94..6676cba0d 100644 --- a/README.md +++ b/README.md @@ -437,7 +437,51 @@ console.log(raw.headers.get('X-My-Header')); console.log(chatCompletion); ``` -## Customizing the fetch client +### Making custom/undocumented requests + +This library is typed for convenient access to the documented API. If you need to access undocumented +endpoints, params, or response properties, the library can still be used. + +#### Undocumented endpoints + +To make requests to undocumented endpoints, you can use `client.get`, `client.post`, and other HTTP verbs. +Options on the client, such as retries, will be respected when making these requests. + +```ts +await client.post('/some/path', { + body: { some_prop: 'foo' }, + query: { some_query_arg: 'bar' }, +}); +``` + +#### Undocumented params + +To make requests using undocumented parameters, you may use `// @ts-expect-error` on the undocumented +parameter. This library doesn't validate at runtime that the request matches the type, so any extra values you +send will be sent as-is. + +```ts +client.foo.create({ + foo: 'my_param', + bar: 12, + // @ts-expect-error baz is not yet public + baz: 'undocumented option', +}); +``` + +For requests with the `GET` verb, any extra params will be in the query, all other requests will send the +extra param in the body. + +If you want to explicitly send an extra argument, you can do so with the `query`, `body`, and `headers` request +options. + +#### Undocumented properties + +To access undocumented response properties, you may access the response object with `// @ts-expect-error` on +the response object, or cast the response object to the requisite type. Like the request params, we do not +validate or strip extra properties from the response from the API. + +### Customizing the fetch client By default, this library uses `node-fetch` in Node, and expects a global `fetch` function in other environments. @@ -455,6 +499,8 @@ import OpenAI from 'openai'; To do the inverse, add `import "openai/shims/node"` (which does import polyfills). This can also be useful if you are getting the wrong TypeScript types for `Response` ([more details](https://github.com/openai/openai-node/tree/master/src/_shims#readme)). +### Logging and middleware + You may also provide a custom `fetch` function when instantiating the client, which can be used to inspect or alter the `Request` or `Response` before/after each request: @@ -475,7 +521,7 @@ const client = new OpenAI({ Note that if given a `DEBUG=true` environment variable, this library will log all requests and responses automatically. This is intended for debugging purposes only and may change in the future without notice. -## Configuring an HTTP(S) Agent (e.g., for proxies) +### Configuring an HTTP(S) Agent (e.g., for proxies) By default, this library uses a stable agent for all http/https requests to reuse TCP connections, eliminating many TCP & TLS handshakes and shaving around 100ms off most requests. From 1b5f9027728341061ec40b32e1010928db5253fc Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 21 Mar 2024 11:55:15 -0400 Subject: [PATCH 034/533] fix: handle process.env being undefined in debug func (#733) --- src/core.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core.ts b/src/core.ts index a94251808..4364c7a3c 100644 --- a/src/core.ts +++ b/src/core.ts @@ -1075,7 +1075,7 @@ function applyHeadersMut(targetHeaders: Headers, newHeaders: Headers): void { } export function debug(action: string, ...args: any[]) { - if (typeof process !== 'undefined' && process.env['DEBUG'] === 'true') { + if (typeof process !== 'undefined' && process?.env?.['DEBUG'] === 'true') { console.log(`OpenAI:DEBUG:${action}`, ...args); } } From f2925e54f32f972ab439d4a6d36a422ec56524c3 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 25 Mar 2024 07:30:00 -0400 Subject: [PATCH 035/533] fix(client): correctly send deno version header (#736) --- src/core.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/core.ts b/src/core.ts index 4364c7a3c..39fe0f97f 100644 --- a/src/core.ts +++ b/src/core.ts @@ -818,7 +818,8 @@ const getPlatformProperties = (): PlatformProperties => { 'X-Stainless-OS': normalizePlatform(Deno.build.os), 'X-Stainless-Arch': normalizeArch(Deno.build.arch), 'X-Stainless-Runtime': 'deno', - 'X-Stainless-Runtime-Version': Deno.version, + 'X-Stainless-Runtime-Version': + typeof Deno.version === 'string' ? Deno.version : Deno.version?.deno ?? 'unknown', }; } if (typeof EdgeRuntime !== 'undefined') { From 1b1d357314d9b1995c9787fec9fa8514fd384886 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 26 Mar 2024 19:08:47 -0400 Subject: [PATCH 036/533] chore(internal): add type (#737) --- src/streaming.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/streaming.ts b/src/streaming.ts index c452737aa..6b0f2a345 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -201,7 +201,7 @@ export class Stream implements AsyncIterable { async start() { iter = self[Symbol.asyncIterator](); }, - async pull(ctrl) { + async pull(ctrl: any) { try { const { value, done } = await iter.next(); if (done) return ctrl.close(); From 3dcaa345a7395e80cb91f32c5b2361a5dd8d1222 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 27 Mar 2024 19:51:22 +0000 Subject: [PATCH 037/533] feat: assistant fromReadableStream (#738) --- src/lib/AssistantStream.ts | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index d70cb7358..c0a176db5 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -158,6 +158,32 @@ export class AssistantStream }; } + static fromReadableStream(stream: ReadableStream): AssistantStream { + const runner = new AssistantStream(); + runner._run(() => runner._fromReadableStream(stream)); + return runner; + } + + protected async _fromReadableStream( + readableStream: ReadableStream, + options?: Core.RequestOptions, + ): Promise { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + this._connected(); + const stream = Stream.fromReadableStream(readableStream, this.controller); + for await (const event of stream) { + this.#handleEvent(event); + } + if (stream.controller.signal?.aborted) { + throw new APIUserAbortError(); + } + return this._addRun(this.#endRequest()); + } + toReadableStream(): ReadableStream { const stream = new Stream(this[Symbol.asyncIterator].bind(this), this.controller); return stream.toReadableStream(); @@ -385,7 +411,7 @@ export class AssistantStream throw new OpenAIError(`stream has ended, this shouldn't happen`); } - if (!this.#finalRun) throw Error('Final run has been been received'); + if (!this.#finalRun) throw Error('Final run has not been received'); return this.#finalRun; } From 237388533476b8b34fbda7ce5fbb9b466dae9c3c Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 27 Mar 2024 20:38:46 +0000 Subject: [PATCH 038/533] fix(example): correcting example (#739) --- examples/assistant-stream-raw.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/assistant-stream-raw.ts b/examples/assistant-stream-raw.ts index a882d219a..399064807 100644 --- a/examples/assistant-stream-raw.ts +++ b/examples/assistant-stream-raw.ts @@ -1,3 +1,5 @@ +#!/usr/bin/env -S npm run tsn -T + import OpenAI from 'openai'; const openai = new OpenAI(); @@ -27,7 +29,7 @@ async function main() { for await (const event of stream) { if (event.event === 'thread.message.delta') { const chunk = event.data.delta.content?.[0]; - if (chunk && 'text' in chunk) { + if (chunk && 'text' in chunk && chunk.text.value) { process.stdout.write(chunk.text.value); } } From 540d9ca9b9d84df6987fdedf640c2fa761417f2e Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 28 Mar 2024 05:06:39 +0000 Subject: [PATCH 039/533] release: 4.30.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 27 +++++++++++++++++++++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 32 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index fc4efb3a0..1e5205f3f 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.29.2" + ".": "4.30.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 497a341af..bbc1785dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ # Changelog +## 4.30.0 (2024-03-28) + +Full Changelog: [v4.29.2...v4.30.0](https://github.com/openai/openai-node/compare/v4.29.2...v4.30.0) + +### Features + +* assistant fromReadableStream ([#738](https://github.com/openai/openai-node/issues/738)) ([8f4ba18](https://github.com/openai/openai-node/commit/8f4ba18268797d6c54c393d701b13c7ff2aa71bc)) + + +### Bug Fixes + +* **client:** correctly send deno version header ([#736](https://github.com/openai/openai-node/issues/736)) ([b7ea175](https://github.com/openai/openai-node/commit/b7ea175b2854909de77b920dd25613f1d2daefd6)) +* **example:** correcting example ([#739](https://github.com/openai/openai-node/issues/739)) ([a819551](https://github.com/openai/openai-node/commit/a81955175da24e196490a38850bbf6f9b6779ea8)) +* handle process.env being undefined in debug func ([#733](https://github.com/openai/openai-node/issues/733)) ([2baa149](https://github.com/openai/openai-node/commit/2baa1491f7834f779ca49c3027d2344ead412dd2)) +* **internal:** make toFile use input file's options ([#727](https://github.com/openai/openai-node/issues/727)) ([15880d7](https://github.com/openai/openai-node/commit/15880d77b6c1cf58a6b9cfdbf7ae4442cdbddbd6)) + + +### Chores + +* **internal:** add type ([#737](https://github.com/openai/openai-node/issues/737)) ([18c1989](https://github.com/openai/openai-node/commit/18c19891f783019517d7961fe03c4d98de0fcf93)) + + +### Documentation + +* **readme:** consistent use of sentence case in headings ([#729](https://github.com/openai/openai-node/issues/729)) ([7e515fd](https://github.com/openai/openai-node/commit/7e515fde433ebfb7871d75d53915eef05a08a916)) +* **readme:** document how to make undocumented requests ([#730](https://github.com/openai/openai-node/issues/730)) ([a06d861](https://github.com/openai/openai-node/commit/a06d861a015eeee411fa2c6ed9bf3000313cfc03)) + ## 4.29.2 (2024-03-19) Full Changelog: [v4.29.1...v4.29.2](https://github.com/openai/openai-node/compare/v4.29.1...v4.29.2) diff --git a/README.md b/README.md index 6676cba0d..892c0ca1b 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.29.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.30.0/mod.ts'; ``` diff --git a/build-deno b/build-deno index 25569475f..6290acb0c 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.29.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.30.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 25994c236..57fa7aec6 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.29.2", + "version": "4.30.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index a9177ff54..2eb76a884 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.29.2'; // x-release-please-version +export const VERSION = '4.30.0'; // x-release-please-version From 7741b186fe7b04bf69594b1fb106e1deba3e52e0 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 29 Mar 2024 20:28:58 +0000 Subject: [PATCH 040/533] fix(streaming): trigger all event handlers with fromReadableStream (#741) --- src/lib/AssistantStream.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index c0a176db5..ece0ec65c 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -176,7 +176,7 @@ export class AssistantStream this._connected(); const stream = Stream.fromReadableStream(readableStream, this.controller); for await (const event of stream) { - this.#handleEvent(event); + this.#addEvent(event); } if (stream.controller.signal?.aborted) { throw new APIUserAbortError(); From 149d60e80ac5ab5b12a10a38f9b0d159dffd56ae Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 29 Mar 2024 21:08:19 +0000 Subject: [PATCH 041/533] feat(api): adding temperature parameter (#742) --- .../beta/threads/messages/messages.ts | 16 ++++--- src/resources/beta/threads/runs/runs.ts | 19 ++++++++ src/resources/beta/threads/threads.ts | 44 +++++++++++++++---- .../beta/threads/runs/runs.test.ts | 1 + .../beta/threads/threads.test.ts | 1 + 5 files changed, 66 insertions(+), 15 deletions(-) diff --git a/src/resources/beta/threads/messages/messages.ts b/src/resources/beta/threads/messages/messages.ts index a2f2aaf1c..1c37eb2ff 100644 --- a/src/resources/beta/threads/messages/messages.ts +++ b/src/resources/beta/threads/messages/messages.ts @@ -353,9 +353,9 @@ export interface Message { role: 'user' | 'assistant'; /** - * If applicable, the ID of the - * [run](https://platform.openai.com/docs/api-reference/runs) associated with the - * authoring of this message. + * The ID of the [run](https://platform.openai.com/docs/api-reference/runs) + * associated with the creation of this message. Value is `null` when messages are + * created manually using the create message or create thread endpoints. */ run_id: string | null; @@ -501,10 +501,14 @@ export interface MessageCreateParams { content: string; /** - * The role of the entity that is creating the message. Currently only `user` is - * supported. + * The role of the entity that is creating the message. Allowed values include: + * + * - `user`: Indicates the message is sent by an actual user and should be used in + * most cases to represent user-generated messages. + * - `assistant`: Indicates the message is generated by the assistant. Use this + * value to insert messages from the assistant into the conversation. */ - role: 'user'; + role: 'user' | 'assistant'; /** * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index a28dd9ae9..54c671131 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -331,6 +331,11 @@ export interface Run { * in a terminal state (i.e. `in_progress`, `queued`, etc.). */ usage: Run.Usage | null; + + /** + * The sampling temperature used for this run. If not set, defaults to 1. + */ + temperature?: number | null; } export namespace Run { @@ -461,6 +466,13 @@ export interface RunCreateParamsBase { */ stream?: boolean | null; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. + */ + temperature?: number | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. @@ -555,6 +567,13 @@ export interface RunCreateAndStreamParams { */ model?: string | null; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. + */ + temperature?: number | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 266f6709e..9b4785850 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -164,10 +164,14 @@ export namespace ThreadCreateParams { content: string; /** - * The role of the entity that is creating the message. Currently only `user` is - * supported. + * The role of the entity that is creating the message. Allowed values include: + * + * - `user`: Indicates the message is sent by an actual user and should be used in + * most cases to represent user-generated messages. + * - `assistant`: Indicates the message is generated by the assistant. Use this + * value to insert messages from the assistant into the conversation. */ - role: 'user'; + role: 'user' | 'assistant'; /** * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that @@ -238,6 +242,13 @@ export interface ThreadCreateAndRunParamsBase { */ stream?: boolean | null; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. + */ + temperature?: number | null; + /** * If no thread is provided, an empty thread will be created. */ @@ -280,10 +291,14 @@ export namespace ThreadCreateAndRunParams { content: string; /** - * The role of the entity that is creating the message. Currently only `user` is - * supported. + * The role of the entity that is creating the message. Allowed values include: + * + * - `user`: Indicates the message is sent by an actual user and should be used in + * most cases to represent user-generated messages. + * - `assistant`: Indicates the message is generated by the assistant. Use this + * value to insert messages from the assistant into the conversation. */ - role: 'user'; + role: 'user' | 'assistant'; /** * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that @@ -355,6 +370,13 @@ export interface ThreadCreateAndRunStreamParams { */ model?: string | null; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. + */ + temperature?: number | null; + /** * If no thread is provided, an empty thread will be created. */ @@ -397,10 +419,14 @@ export namespace ThreadCreateAndRunStreamParams { content: string; /** - * The role of the entity that is creating the message. Currently only `user` is - * supported. + * The role of the entity that is creating the message. Allowed values include: + * + * - `user`: Indicates the message is sent by an actual user and should be used in + * most cases to represent user-generated messages. + * - `assistant`: Indicates the message is generated by the assistant. Use this + * value to insert messages from the assistant into the conversation. */ - role: 'user'; + role: 'user' | 'assistant'; /** * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 5e1b363fd..5f17c1b58 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -28,6 +28,7 @@ describe('resource runs', () => { metadata: {}, model: 'string', stream: false, + temperature: 1, tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], }); }); diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index 24cb815a7..3606019bd 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -109,6 +109,7 @@ describe('resource threads', () => { metadata: {}, model: 'string', stream: false, + temperature: 1, thread: { messages: [ { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, From abb0be7bcc6777e2efa8682c8a044842addb755a Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Sat, 30 Mar 2024 05:06:22 +0000 Subject: [PATCH 042/533] release: 4.31.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1e5205f3f..485bcd4e9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.30.0" + ".": "4.31.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index bbc1785dc..6a28f8d3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.31.0 (2024-03-30) + +Full Changelog: [v4.30.0...v4.31.0](https://github.com/openai/openai-node/compare/v4.30.0...v4.31.0) + +### Features + +* **api:** adding temperature parameter ([#742](https://github.com/openai/openai-node/issues/742)) ([b173b05](https://github.com/openai/openai-node/commit/b173b05eb52266d8f2c835ec4ed71cba8cdc609b)) + + +### Bug Fixes + +* **streaming:** trigger all event handlers with fromReadableStream ([#741](https://github.com/openai/openai-node/issues/741)) ([7b1e593](https://github.com/openai/openai-node/commit/7b1e5937d97b309ed51928b4388dcde74abda8dc)) + ## 4.30.0 (2024-03-28) Full Changelog: [v4.29.2...v4.30.0](https://github.com/openai/openai-node/compare/v4.29.2...v4.30.0) diff --git a/README.md b/README.md index 892c0ca1b..787dd25ae 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.30.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.31.0/mod.ts'; ``` diff --git a/build-deno b/build-deno index 6290acb0c..66639030f 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.30.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.31.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 57fa7aec6..250e0939a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.30.0", + "version": "4.31.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 2eb76a884..8eb5423f5 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.30.0'; // x-release-please-version +export const VERSION = '4.31.0'; // x-release-please-version From 60bc77f87e860e86b702c506cf9e1a725b81a697 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Sat, 30 Mar 2024 20:44:38 +0000 Subject: [PATCH 043/533] docs(readme): change undocumented params wording (#744) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 787dd25ae..6707707b2 100644 --- a/README.md +++ b/README.md @@ -454,7 +454,7 @@ await client.post('/some/path', { }); ``` -#### Undocumented params +#### Undocumented request params To make requests using undocumented parameters, you may use `// @ts-expect-error` on the undocumented parameter. This library doesn't validate at runtime that the request matches the type, so any extra values you @@ -475,7 +475,7 @@ extra param in the body. If you want to explicitly send an extra argument, you can do so with the `query`, `body`, and `headers` request options. -#### Undocumented properties +#### Undocumented response properties To access undocumented response properties, you may access the response object with `// @ts-expect-error` on the response object, or cast the response object to the requisite type. Like the request params, we do not From 767bec025dd349c3a982a0aa62b134692d9a3ad2 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 1 Apr 2024 22:52:20 +0200 Subject: [PATCH 044/533] feat(api): add support for filtering messages by run_id (#747) --- src/resources/beta/threads/messages/messages.ts | 5 +++++ tests/api-resources/beta/threads/messages/messages.test.ts | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/resources/beta/threads/messages/messages.ts b/src/resources/beta/threads/messages/messages.ts index 1c37eb2ff..28026f3ff 100644 --- a/src/resources/beta/threads/messages/messages.ts +++ b/src/resources/beta/threads/messages/messages.ts @@ -551,6 +551,11 @@ export interface MessageListParams extends CursorPageParams { * order and `desc` for descending order. */ order?: 'asc' | 'desc'; + + /** + * Filter messages by the run ID that generated them. + */ + run_id?: string; } export namespace Messages { diff --git a/tests/api-resources/beta/threads/messages/messages.test.ts b/tests/api-resources/beta/threads/messages/messages.test.ts index 3a80bfe1e..7f62944e0 100644 --- a/tests/api-resources/beta/threads/messages/messages.test.ts +++ b/tests/api-resources/beta/threads/messages/messages.test.ts @@ -81,7 +81,7 @@ describe('resource messages', () => { await expect( openai.beta.threads.messages.list( 'string', - { after: 'string', before: 'string', limit: 0, order: 'asc' }, + { after: 'string', before: 'string', limit: 0, order: 'asc', run_id: 'string' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); From bc202fcdd9d3f54ff028b7f809b784f26fbf9b29 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 1 Apr 2024 23:13:12 +0200 Subject: [PATCH 045/533] chore(deps): remove unused dependency digest-fetch (#748) --- package.json | 1 - tsconfig.build.json | 1 - tsconfig.deno.json | 1 - tsconfig.json | 1 - typings/digest-fetch/index.d.ts | 33 ----------------------------- yarn.lock | 37 --------------------------------- 6 files changed, 74 deletions(-) delete mode 100644 typings/digest-fetch/index.d.ts diff --git a/package.json b/package.json index 250e0939a..6fb9f1789 100644 --- a/package.json +++ b/package.json @@ -29,7 +29,6 @@ "@types/node-fetch": "^2.6.4", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", - "digest-fetch": "^1.3.0", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", "node-fetch": "^2.6.7", diff --git a/tsconfig.build.json b/tsconfig.build.json index 6adad0d06..45811cb8b 100644 --- a/tsconfig.build.json +++ b/tsconfig.build.json @@ -7,7 +7,6 @@ "paths": { "openai/*": ["dist/src/*"], "openai": ["dist/src/index.ts"], - "digest-fetch": ["./typings/digest-fetch"] }, "noEmit": false, "declaration": true, diff --git a/tsconfig.deno.json b/tsconfig.deno.json index 5d6467665..d0e9473d9 100644 --- a/tsconfig.deno.json +++ b/tsconfig.deno.json @@ -9,7 +9,6 @@ "openai/_shims/auto/*": ["deno/_shims/auto/*-deno"], "openai/*": ["deno/*"], "openai": ["deno/index.ts"], - "digest-fetch": ["./typings/digest-fetch"] }, "noEmit": true, "declaration": true, diff --git a/tsconfig.json b/tsconfig.json index 9908b2c80..5f99085fc 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -12,7 +12,6 @@ "openai/_shims/auto/*": ["src/_shims/auto/*-node"], "openai/*": ["src/*"], "openai": ["src/index.ts"], - "digest-fetch": ["./typings/digest-fetch"] }, "noEmit": true, diff --git a/typings/digest-fetch/index.d.ts b/typings/digest-fetch/index.d.ts deleted file mode 100644 index f6bcbfda9..000000000 --- a/typings/digest-fetch/index.d.ts +++ /dev/null @@ -1,33 +0,0 @@ -declare module 'digest-fetch'; - -import type { RequestInfo, RequestInit, Response } from 'node-fetch'; - -type Algorithm = 'MD5' | 'MD5-sess'; - -type Options = { - algorithm?: Algorithm; - statusCode?: number; - cnonceSize?: number; - basic?: boolean; - precomputeHash?: boolean; - logger?: typeof console; -}; - -class DigestClient { - user: string; - password: string; - - private nonceRaw: string; - private logger?: typeof console; - private precomputedHash?: boolean; - private statusCode?: number; - private basic: boolean; - private cnonceSize: number; - private hasAuth: boolean; - private digest: { nc: number; algorithm: Algorithm; realm: string }; - - constructor(user: string, password: string, options: Options = {}); - async fetch(url: RequestInfo, options: RequestInit = {}): Promise; -} - -export default DigestClient; diff --git a/yarn.lock b/yarn.lock index a79485a26..9cef21d9b 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1076,11 +1076,6 @@ balanced-match@^1.0.0: resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== -base-64@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/base-64/-/base-64-0.1.0.tgz#780a99c84e7d600260361511c4877613bf24f6bb" - integrity sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA== - big-integer@^1.6.44: version "1.6.52" resolved "https://registry.yarnpkg.com/big-integer/-/big-integer-1.6.52.tgz#60a887f3047614a8e1bffe5d7173490a97dc8c85" @@ -1193,11 +1188,6 @@ char-regex@^1.0.2: resolved "https://registry.yarnpkg.com/char-regex/-/char-regex-1.0.2.tgz#d744358226217f981ed58f479b1d6bcc29545dcf" integrity sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw== -charenc@0.0.2: - version "0.0.2" - resolved "https://registry.yarnpkg.com/charenc/-/charenc-0.0.2.tgz#c0a1d2f3a7092e03774bfa83f14c0fc5790a8667" - integrity sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA== - ci-info@^3.2.0: version "3.9.0" resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.9.0.tgz#4279a62028a7b1f262f3473fc9605f5e218c59b4" @@ -1305,11 +1295,6 @@ cross-spawn@^7.0.2, cross-spawn@^7.0.3: shebang-command "^2.0.0" which "^2.0.1" -crypt@0.0.2: - version "0.0.2" - resolved "https://registry.yarnpkg.com/crypt/-/crypt-0.0.2.tgz#88d7ff7ec0dfb86f713dc87bbb42d044d3e6c41b" - integrity sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow== - debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4: version "4.3.4" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" @@ -1380,14 +1365,6 @@ diff@^4.0.1: resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== -digest-fetch@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/digest-fetch/-/digest-fetch-1.3.0.tgz#898e69264d00012a23cf26e8a3e40320143fc661" - integrity sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA== - dependencies: - base-64 "^0.1.0" - md5 "^2.3.0" - dir-glob@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" @@ -1934,11 +1911,6 @@ is-arrayish@^0.2.1: resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg== -is-buffer@~1.1.6: - version "1.1.6" - resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" - integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w== - is-core-module@^2.13.0: version "2.13.1" resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.1.tgz#ad0d7532c6fea9da1ebdc82742d74525c6273384" @@ -2553,15 +2525,6 @@ makeerror@1.0.12: dependencies: tmpl "1.0.5" -md5@^2.3.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/md5/-/md5-2.3.0.tgz#c3da9a6aae3a30b46b7b0c349b87b110dc3bda4f" - integrity sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g== - dependencies: - charenc "0.0.2" - crypt "0.0.2" - is-buffer "~1.1.6" - merge-stream@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" From 8031df3675c36cb654e37a63edbc7e5b02b05bac Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 2 Apr 2024 00:38:55 +0200 Subject: [PATCH 046/533] feat(api): run polling helpers (#749) refactor: rename createAndStream to stream --- README.md | 19 +- api.md | 5 + examples/assistant-stream-raw.ts | 0 examples/assistant-stream.ts | 2 +- examples/assistants.ts | 22 +-- helpers.md | 4 +- src/resources/beta/beta.ts | 1 + src/resources/beta/index.ts | 1 + src/resources/beta/threads/index.ts | 4 + src/resources/beta/threads/runs/index.ts | 3 + src/resources/beta/threads/runs/runs.ts | 224 ++++++++++++++++++++++- src/resources/beta/threads/threads.ts | 124 +++++++++++++ 12 files changed, 389 insertions(+), 20 deletions(-) mode change 100644 => 100755 examples/assistant-stream-raw.ts mode change 100644 => 100755 examples/assistant-stream.ts mode change 100644 => 100755 examples/assistants.ts diff --git a/README.md b/README.md index 6707707b2..1ff9c757d 100644 --- a/README.md +++ b/README.md @@ -100,13 +100,30 @@ Documentation for each method, request param, and response field are available i > [!IMPORTANT] > Previous versions of this SDK used a `Configuration` class. See the [v3 to v4 migration guide](https://github.com/openai/openai-node/discussions/217). +### Polling Helpers + +When interacting with the API some actions such as starting a Run may take time to complete. The SDK includes +helper functions which will poll the status until it reaches a terminal state and then return the resulting object. +If an API method results in an action which could benefit from polling there will be a corresponding version of the +method ending in 'AndPoll'. + +For instance to create a Run and poll until it reaches a terminal state you can run: + +```ts +const run = await openai.beta.threads.runs.createAndPoll(thread.id, { + assistant_id: assistantId, +}); +``` + +More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle) + ### Streaming Helpers The SDK also includes helpers to process streams and handle the incoming events. ```ts const run = openai.beta.threads.runs - .createAndStream(thread.id, { + .stream(thread.id, { assistant_id: assistant.id, }) .on('textCreated', (text) => process.stdout.write('\nassistant > ')) diff --git a/api.md b/api.md index 504a103c7..2f82dd17b 100644 --- a/api.md +++ b/api.md @@ -224,6 +224,7 @@ Methods: - client.beta.threads.update(threadId, { ...params }) -> Thread - client.beta.threads.del(threadId) -> ThreadDeleted - client.beta.threads.createAndRun({ ...params }) -> Run +- client.beta.threads.createAndRunPoll(body, options?) -> Promise<Threads.Run> - client.beta.threads.createAndRunStream(body, options?) -> AssistantStream ### Runs @@ -242,7 +243,11 @@ Methods: - client.beta.threads.runs.list(threadId, { ...params }) -> RunsPage - client.beta.threads.runs.cancel(threadId, runId) -> Run - client.beta.threads.runs.submitToolOutputs(threadId, runId, { ...params }) -> Run +- client.beta.threads.runs.createAndPoll(threadId, body, options?) -> Promise<Run> - client.beta.threads.runs.createAndStream(threadId, body, options?) -> AssistantStream +- client.beta.threads.runs.poll(threadId, runId, options?) -> Promise<Run> +- client.beta.threads.runs.stream(threadId, body, options?) -> AssistantStream +- client.beta.threads.runs.submitToolOutputsAndPoll(threadId, runId, body, options?) -> Promise<Run> - client.beta.threads.runs.submitToolOutputsStream(threadId, runId, body, options?) -> AssistantStream #### Steps diff --git a/examples/assistant-stream-raw.ts b/examples/assistant-stream-raw.ts old mode 100644 new mode 100755 diff --git a/examples/assistant-stream.ts b/examples/assistant-stream.ts old mode 100644 new mode 100755 index 36c4ed152..6c71bf23b --- a/examples/assistant-stream.ts +++ b/examples/assistant-stream.ts @@ -31,7 +31,7 @@ async function main() { console.log('Created thread with Id: ' + threadId); const run = openai.beta.threads.runs - .createAndStream(threadId, { + .stream(threadId, { assistant_id: assistantId, }) //Subscribe to streaming events and log them diff --git a/examples/assistants.ts b/examples/assistants.ts old mode 100644 new mode 100755 index bbc2f80ce..40238ac86 --- a/examples/assistants.ts +++ b/examples/assistants.ts @@ -1,7 +1,6 @@ #!/usr/bin/env -S npm run tsn -T import OpenAI from 'openai'; -import { sleep } from 'openai/core'; /** * Example of polling for a complete response from an assistant @@ -32,24 +31,17 @@ async function main() { let threadId = thread.id; console.log('Created thread with Id: ' + threadId); - const run = await openai.beta.threads.runs.create(thread.id, { + const run = await openai.beta.threads.runs.createAndPoll(thread.id, { assistant_id: assistantId, additional_instructions: 'Please address the user as Jane Doe. The user has a premium account.', }); - console.log('Created run with Id: ' + run.id); - - while (true) { - const result = await openai.beta.threads.runs.retrieve(thread.id, run.id); - if (result.status == 'completed') { - const messages = await openai.beta.threads.messages.list(thread.id); - for (const message of messages.getPaginatedItems()) { - console.log(message); - } - break; - } else { - console.log('Waiting for completion. Current status: ' + result.status); - await sleep(5000); + console.log('Run finished with status: ' + run.status); + + if (run.status == 'completed') { + const messages = await openai.beta.threads.messages.list(thread.id); + for (const message of messages.getPaginatedItems()) { + console.log(message); } } } diff --git a/helpers.md b/helpers.md index 9a94a618e..7a34c3023 100644 --- a/helpers.md +++ b/helpers.md @@ -13,7 +13,7 @@ More information can be found in the documentation: [Assistant Streaming](https: ```ts const run = openai.beta.threads.runs - .createAndStream(thread.id, { + .stream(thread.id, { assistant_id: assistant.id, }) .on('textCreated', (text) => process.stdout.write('\nassistant > ')) @@ -41,7 +41,7 @@ const run = openai.beta.threads.runs There are three helper methods for creating streams: ```ts -openai.beta.threads.runs.createAndStream(); +openai.beta.threads.runs.stream(); ``` This method can be used to start and stream the response to an existing run with an associated thread diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 43ee8c7e7..7d4457319 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -37,5 +37,6 @@ export namespace Beta { export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams; export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; + export import ThreadCreateAndRunPollParams = ThreadsAPI.ThreadCreateAndRunPollParams; export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams; } diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index 7f35730fb..e43ff7315 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -28,6 +28,7 @@ export { ThreadCreateAndRunParams, ThreadCreateAndRunParamsNonStreaming, ThreadCreateAndRunParamsStreaming, + ThreadCreateAndRunPollParams, ThreadCreateAndRunStreamParams, Threads, } from './threads/index'; diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index 097a52819..ac2f9a4fa 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -36,10 +36,13 @@ export { RunCreateParamsStreaming, RunUpdateParams, RunListParams, + RunCreateAndPollParams, RunCreateAndStreamParams, + RunStreamParams, RunSubmitToolOutputsParams, RunSubmitToolOutputsParamsNonStreaming, RunSubmitToolOutputsParamsStreaming, + RunSubmitToolOutputsAndPollParams, RunSubmitToolOutputsStreamParams, RunsPage, Runs, @@ -52,6 +55,7 @@ export { ThreadCreateAndRunParams, ThreadCreateAndRunParamsNonStreaming, ThreadCreateAndRunParamsStreaming, + ThreadCreateAndRunPollParams, ThreadCreateAndRunStreamParams, Threads, } from './threads'; diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts index 636b5d850..c9b2d1ef5 100644 --- a/src/resources/beta/threads/runs/index.ts +++ b/src/resources/beta/threads/runs/index.ts @@ -31,10 +31,13 @@ export { RunCreateParamsStreaming, RunUpdateParams, RunListParams, + RunCreateAndPollParams, RunCreateAndStreamParams, + RunStreamParams, RunSubmitToolOutputsParams, RunSubmitToolOutputsParamsNonStreaming, RunSubmitToolOutputsParamsStreaming, + RunSubmitToolOutputsAndPollParams, RunSubmitToolOutputsStreamParams, RunsPage, Runs, diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 54c671131..5dfc7d595 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -5,6 +5,7 @@ import { APIPromise } from 'openai/core'; import { APIResource } from 'openai/resource'; import { isRequestOptions } from 'openai/core'; import { AssistantStream, RunCreateParamsBaseStream } from 'openai/lib/AssistantStream'; +import { sleep } from 'openai/core'; import { RunSubmitToolOutputsParamsStream } from 'openai/lib/AssistantStream'; import * as RunsAPI from 'openai/resources/beta/threads/runs/runs'; import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants'; @@ -102,8 +103,24 @@ export class Runs extends APIResource { }); } + /** + * A helper to create a run an poll for a terminal state. More information on Run + * lifecycles can be found here: + * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps + */ + async createAndPoll( + threadId: string, + body: RunCreateParamsNonStreaming, + options?: Core.RequestOptions & { pollIntervalMs?: number }, + ): Promise { + const run = await this.create(threadId, body, options); + return await this.poll(threadId, run.id, options); + } + /** * Create a Run stream + * + * @deprecated use `stream` instead */ createAndStream( threadId: string, @@ -113,6 +130,66 @@ export class Runs extends APIResource { return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options); } + /** + * A helper to poll a run status until it reaches a terminal state. More + * information on Run lifecycles can be found here: + * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps + */ + async poll( + threadId: string, + runId: string, + options?: Core.RequestOptions & { pollIntervalMs?: number }, + ): Promise { + const headers: { [key: string]: string } = { ...options?.headers, 'X-Stainless-Poll-Helper': 'true' }; + + if (options?.pollIntervalMs) { + headers['X-Stainless-Custom-Poll-Interval'] = options.pollIntervalMs.toString(); + } + + while (true) { + const { data: run, response } = await this.retrieve(threadId, runId, { + ...options, + headers: { ...options?.headers, ...headers }, + }).withResponse(); + + switch (run.status) { + //If we are in any sort of intermediate state we poll + case 'queued': + case 'in_progress': + case 'cancelling': + let sleepInterval = 5000; + + if (options?.pollIntervalMs) { + sleepInterval = options.pollIntervalMs; + } else { + const headerInterval = response.headers.get('openai-poll-after-ms'); + if (headerInterval) { + const headerIntervalMs = parseInt(headerInterval); + if (!isNaN(headerIntervalMs)) { + sleepInterval = headerIntervalMs; + } + } + } + await sleep(sleepInterval); + break; + //We return the run in any terminal state. + case 'requires_action': + case 'cancelled': + case 'completed': + case 'failed': + case 'expired': + return run; + } + } + } + + /** + * Create a Run stream + */ + stream(threadId: string, body: RunCreateParamsBaseStream, options?: Core.RequestOptions): AssistantStream { + return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options); + } + /** * When a run has the `status: "requires_action"` and `required_action.type` is * `submit_tool_outputs`, this endpoint can be used to submit the outputs from the @@ -151,9 +228,25 @@ export class Runs extends APIResource { }) as APIPromise | APIPromise>; } + /** + * A helper to submit a tool output to a run and poll for a terminal run state. + * More information on Run lifecycles can be found here: + * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps + */ + async submitToolOutputsAndPoll( + threadId: string, + runId: string, + body: RunSubmitToolOutputsParamsNonStreaming, + options?: Core.RequestOptions & { pollIntervalMs?: number }, + ): Promise { + const run = await this.submitToolOutputs(threadId, runId, body, options); + return await this.poll(threadId, run.id, options); + } + /** * Submit the tool outputs from a previous run and stream the run to a terminal - * state. + * state. More information on Run lifecycles can be found here: + * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps */ submitToolOutputsStream( threadId: string, @@ -529,6 +622,58 @@ export interface RunListParams extends CursorPageParams { order?: 'asc' | 'desc'; } +export interface RunCreateAndPollParams { + /** + * The ID of the + * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + * execute this run. + */ + assistant_id: string; + + /** + * Appends additional instructions at the end of the instructions for the run. This + * is useful for modifying the behavior on a per-run basis without overriding other + * instructions. + */ + additional_instructions?: string | null; + + /** + * Overrides the + * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + * of the assistant. This is useful for modifying the behavior on a per-run basis. + */ + instructions?: string | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + + /** + * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + * be used to execute this run. If a value is provided here, it will override the + * model associated with the assistant. If not, the model associated with the + * assistant will be used. + */ + model?: string | null; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. + */ + temperature?: number | null; + + /** + * Override the tools the assistant can use for this run. This is useful for + * modifying the behavior on a per-run basis. + */ + tools?: Array | null; +} + export interface RunCreateAndStreamParams { /** * The ID of the @@ -581,6 +726,58 @@ export interface RunCreateAndStreamParams { tools?: Array | null; } +export interface RunStreamParams { + /** + * The ID of the + * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + * execute this run. + */ + assistant_id: string; + + /** + * Appends additional instructions at the end of the instructions for the run. This + * is useful for modifying the behavior on a per-run basis without overriding other + * instructions. + */ + additional_instructions?: string | null; + + /** + * Overrides the + * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + * of the assistant. This is useful for modifying the behavior on a per-run basis. + */ + instructions?: string | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + + /** + * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + * be used to execute this run. If a value is provided here, it will override the + * model associated with the assistant. If not, the model associated with the + * assistant will be used. + */ + model?: string | null; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. + */ + temperature?: number | null; + + /** + * Override the tools the assistant can use for this run. This is useful for + * modifying the behavior on a per-run basis. + */ + tools?: Array | null; +} + export type RunSubmitToolOutputsParams = | RunSubmitToolOutputsParamsNonStreaming | RunSubmitToolOutputsParamsStreaming; @@ -635,6 +832,28 @@ export interface RunSubmitToolOutputsParamsStreaming extends RunSubmitToolOutput stream: true; } +export interface RunSubmitToolOutputsAndPollParams { + /** + * A list of tools for which the outputs are being submitted. + */ + tool_outputs: Array; +} + +export namespace RunSubmitToolOutputsAndPollParams { + export interface ToolOutput { + /** + * The output of the tool call to be submitted to continue the run. + */ + output?: string; + + /** + * The ID of the tool call in the `required_action` object within the run object + * the output is being submitted for. + */ + tool_call_id?: string; + } +} + export interface RunSubmitToolOutputsStreamParams { /** * A list of tools for which the outputs are being submitted. @@ -667,10 +886,13 @@ export namespace Runs { export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; export import RunUpdateParams = RunsAPI.RunUpdateParams; export import RunListParams = RunsAPI.RunListParams; + export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams; export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams; + export import RunStreamParams = RunsAPI.RunStreamParams; export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams; export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming; export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming; + export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams; export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams; export import Steps = StepsAPI.Steps; export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs; diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 9b4785850..1b4b3f7d5 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -92,6 +92,19 @@ export class Threads extends APIResource { }) as APIPromise | APIPromise>; } + /** + * A helper to create a thread, start a run and then poll for a terminal state. + * More information on Run lifecycles can be found here: + * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps + */ + async createAndRunPoll( + body: ThreadCreateAndRunParamsNonStreaming, + options?: Core.RequestOptions & { pollIntervalMs?: number }, + ): Promise { + const run = await this.createAndRun(body, options); + return await this.runs.poll(run.thread_id, run.id, options); + } + /** * Create a thread and stream the run back */ @@ -340,6 +353,113 @@ export interface ThreadCreateAndRunParamsStreaming extends ThreadCreateAndRunPar stream: true; } +export interface ThreadCreateAndRunPollParams { + /** + * The ID of the + * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + * execute this run. + */ + assistant_id: string; + + /** + * Override the default system message of the assistant. This is useful for + * modifying the behavior on a per-run basis. + */ + instructions?: string | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + + /** + * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + * be used to execute this run. If a value is provided here, it will override the + * model associated with the assistant. If not, the model associated with the + * assistant will be used. + */ + model?: string | null; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. + */ + temperature?: number | null; + + /** + * If no thread is provided, an empty thread will be created. + */ + thread?: ThreadCreateAndRunPollParams.Thread; + + /** + * Override the tools the assistant can use for this run. This is useful for + * modifying the behavior on a per-run basis. + */ + tools?: Array< + AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool + > | null; +} + +export namespace ThreadCreateAndRunPollParams { + /** + * If no thread is provided, an empty thread will be created. + */ + export interface Thread { + /** + * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + * start the thread with. + */ + messages?: Array; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + } + + export namespace Thread { + export interface Message { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the entity that is creating the message. Allowed values include: + * + * - `user`: Indicates the message is sent by an actual user and should be used in + * most cases to represent user-generated messages. + * - `assistant`: Indicates the message is generated by the assistant. Use this + * value to insert messages from the assistant into the conversation. + */ + role: 'user' | 'assistant'; + + /** + * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + * the message should use. There can be a maximum of 10 files attached to a + * message. Useful for tools like `retrieval` and `code_interpreter` that can + * access and use files. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + } + } +} + export interface ThreadCreateAndRunStreamParams { /** * The ID of the @@ -455,6 +575,7 @@ export namespace Threads { export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams; export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; + export import ThreadCreateAndRunPollParams = ThreadsAPI.ThreadCreateAndRunPollParams; export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams; export import Runs = RunsAPI.Runs; export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall; @@ -466,10 +587,13 @@ export namespace Threads { export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; export import RunUpdateParams = RunsAPI.RunUpdateParams; export import RunListParams = RunsAPI.RunListParams; + export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams; export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams; + export import RunStreamParams = RunsAPI.RunStreamParams; export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams; export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming; export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming; + export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams; export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams; export import Messages = MessagesAPI.Messages; export import Annotation = MessagesAPI.Annotation; From 445b795c4ef4c109e69d1e3d74b179f238e5782c Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 2 Apr 2024 00:39:14 +0200 Subject: [PATCH 047/533] release: 4.32.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 24 ++++++++++++++++++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 29 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 485bcd4e9..a2b09ee37 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.31.0" + ".": "4.32.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 6a28f8d3c..3be8b4c02 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## 4.32.0 (2024-04-01) + +Full Changelog: [v4.31.0...v4.32.0](https://github.com/openai/openai-node/compare/v4.31.0...v4.32.0) + +### Features + +* **api:** add support for filtering messages by run_id ([#747](https://github.com/openai/openai-node/issues/747)) ([9a397ac](https://github.com/openai/openai-node/commit/9a397acffa9f10c3f48e86e3bdb3851770f87b42)) +* **api:** run polling helpers ([#749](https://github.com/openai/openai-node/issues/749)) ([02920ae](https://github.com/openai/openai-node/commit/02920ae082480fc7a7ffe9fa583d053a40dc7120)) + + +### Chores + +* **deps:** remove unused dependency digest-fetch ([#748](https://github.com/openai/openai-node/issues/748)) ([5376837](https://github.com/openai/openai-node/commit/537683734d39dd956a7dcef4339c1167ce6fe13c)) + + +### Documentation + +* **readme:** change undocumented params wording ([#744](https://github.com/openai/openai-node/issues/744)) ([8796691](https://github.com/openai/openai-node/commit/87966911045275db86844dfdcde59653edaef264)) + + +### Refactors + +* rename createAndStream to stream ([02920ae](https://github.com/openai/openai-node/commit/02920ae082480fc7a7ffe9fa583d053a40dc7120)) + ## 4.31.0 (2024-03-30) Full Changelog: [v4.30.0...v4.31.0](https://github.com/openai/openai-node/compare/v4.30.0...v4.31.0) diff --git a/README.md b/README.md index 1ff9c757d..2adc81afc 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.31.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.32.0/mod.ts'; ``` diff --git a/build-deno b/build-deno index 66639030f..19eefa7c3 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.31.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.32.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 6fb9f1789..11fa0c5e2 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.31.0", + "version": "4.32.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 8eb5423f5..7e04c79b5 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.31.0'; // x-release-please-version +export const VERSION = '4.32.0'; // x-release-please-version From 5b41d1077f219b8feb7557cfab98caf7b5de560d Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 2 Apr 2024 15:02:43 +0200 Subject: [PATCH 048/533] chore(deps): bump yarn to v1.22.22 (#751) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 11fa0c5e2..e10df6850 100644 --- a/package.json +++ b/package.json @@ -8,7 +8,7 @@ "type": "commonjs", "repository": "github:openai/openai-node", "license": "Apache-2.0", - "packageManager": "yarn@1.22.21", + "packageManager": "yarn@1.22.22", "files": [ "*" ], From b3269eb0cbeb17415de0863f5cb28c4a9f8b643f Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 2 Apr 2024 15:03:03 +0200 Subject: [PATCH 049/533] release: 4.32.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a2b09ee37..27308d159 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.32.0" + ".": "4.32.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 3be8b4c02..a1702ad3b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.32.1 (2024-04-02) + +Full Changelog: [v4.32.0...v4.32.1](https://github.com/openai/openai-node/compare/v4.32.0...v4.32.1) + +### Chores + +* **deps:** bump yarn to v1.22.22 ([#751](https://github.com/openai/openai-node/issues/751)) ([5b41d10](https://github.com/openai/openai-node/commit/5b41d1077f219b8feb7557cfab98caf7b5de560d)) + ## 4.32.0 (2024-04-01) Full Changelog: [v4.31.0...v4.32.0](https://github.com/openai/openai-node/compare/v4.31.0...v4.32.0) diff --git a/README.md b/README.md index 2adc81afc..aae0367b6 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.32.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.32.1/mod.ts'; ``` diff --git a/build-deno b/build-deno index 19eefa7c3..a56b6af13 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.32.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.32.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index e10df6850..4d87ed952 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.32.0", + "version": "4.32.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 7e04c79b5..c2e5453c3 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.32.0'; // x-release-please-version +export const VERSION = '4.32.1'; // x-release-please-version From 2bd3294ed564492def05a61906e02ae5f2aba6c4 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 2 Apr 2024 19:33:02 +0200 Subject: [PATCH 050/533] chore(tests): bump ecosystem tests dependencies (#753) --- .../cloudflare-worker/package-lock.json | 14 ++-- .../node-ts-cjs-auto/package-lock.json | 20 ++--- .../node-ts-cjs-web/package-lock.json | 48 ++++-------- ecosystem-tests/node-ts-cjs/package-lock.json | 48 ++++-------- .../node-ts-esm-auto/package-lock.json | 26 +++---- .../node-ts-esm-web/package-lock.json | 26 +++---- ecosystem-tests/node-ts-esm/package-lock.json | 26 +++---- .../node-ts4.5-jest27/package-lock.json | 16 ++-- .../node-ts4.5-jest27/package.json | 4 +- .../ts-browser-webpack/package-lock.json | 20 ++--- ecosystem-tests/vercel-edge/package-lock.json | 78 ++++++++++--------- ecosystem-tests/vercel-edge/package.json | 4 +- 12 files changed, 152 insertions(+), 178 deletions(-) diff --git a/ecosystem-tests/cloudflare-worker/package-lock.json b/ecosystem-tests/cloudflare-worker/package-lock.json index 7e86792db..dd42f0b36 100644 --- a/ecosystem-tests/cloudflare-worker/package-lock.json +++ b/ecosystem-tests/cloudflare-worker/package-lock.json @@ -5206,9 +5206,9 @@ } }, "node_modules/ts-jest": { - "version": "29.1.1", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz", - "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==", + "version": "29.1.2", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz", + "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==", "dev": true, "dependencies": { "bs-logger": "0.x", @@ -5224,7 +5224,7 @@ "ts-jest": "cli.js" }, "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + "node": "^16.10.0 || ^18.0.0 || >=20.0.0" }, "peerDependencies": { "@babel/core": ">=7.0.0-beta.0 <8", @@ -5261,9 +5261,9 @@ } }, "node_modules/ts-jest/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" diff --git a/ecosystem-tests/node-ts-cjs-auto/package-lock.json b/ecosystem-tests/node-ts-cjs-auto/package-lock.json index a11f9814d..c3880beb2 100644 --- a/ecosystem-tests/node-ts-cjs-auto/package-lock.json +++ b/ecosystem-tests/node-ts-cjs-auto/package-lock.json @@ -1093,22 +1093,22 @@ } }, "node_modules/@types/node": { - "version": "20.11.20", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", - "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", + "version": "20.11.30", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz", + "integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==", "dev": true, "dependencies": { "undici-types": "~5.26.4" } }, "node_modules/@types/node-fetch": { - "version": "2.6.4", - "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.4.tgz", - "integrity": "sha512-1ZX9fcN4Rvkvgv4E6PAY5WXUFWFcRWxZa3EW83UjycOB9ljJCedb2CupIP4RZMEwF/M3eTcCihbBRgwtGbg5Rg==", + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz", + "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==", "dev": true, "dependencies": { "@types/node": "*", - "form-data": "^3.0.0" + "form-data": "^4.0.0" } }, "node_modules/@types/prettier": { @@ -1783,9 +1783,9 @@ } }, "node_modules/form-data": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz", - "integrity": "sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", "dev": true, "dependencies": { "asynckit": "^0.4.0", diff --git a/ecosystem-tests/node-ts-cjs-web/package-lock.json b/ecosystem-tests/node-ts-cjs-web/package-lock.json index cd721ae53..ff6fb3bac 100644 --- a/ecosystem-tests/node-ts-cjs-web/package-lock.json +++ b/ecosystem-tests/node-ts-cjs-web/package-lock.json @@ -1143,13 +1143,13 @@ "dev": true }, "node_modules/@types/node-fetch": { - "version": "2.6.4", - "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.4.tgz", - "integrity": "sha512-1ZX9fcN4Rvkvgv4E6PAY5WXUFWFcRWxZa3EW83UjycOB9ljJCedb2CupIP4RZMEwF/M3eTcCihbBRgwtGbg5Rg==", + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz", + "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==", "dev": true, "dependencies": { "@types/node": "*", - "form-data": "^3.0.0" + "form-data": "^4.0.0" } }, "node_modules/@types/stack-utils": { @@ -2082,9 +2082,9 @@ } }, "node_modules/form-data": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz", - "integrity": "sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", "dev": true, "dependencies": { "asynckit": "^0.4.0", @@ -3208,20 +3208,6 @@ } } }, - "node_modules/jsdom/node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "dev": true, - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, "node_modules/jsdom/node_modules/tr46": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/tr46/-/tr46-3.0.0.tgz", @@ -4150,9 +4136,9 @@ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" }, "node_modules/ts-jest": { - "version": "29.1.1", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz", - "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==", + "version": "29.1.2", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz", + "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==", "dev": true, "dependencies": { "bs-logger": "0.x", @@ -4168,7 +4154,7 @@ "ts-jest": "cli.js" }, "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + "node": "^16.10.0 || ^18.0.0 || >=20.0.0" }, "peerDependencies": { "@babel/core": ">=7.0.0-beta.0 <8", @@ -4205,9 +4191,9 @@ } }, "node_modules/ts-jest/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -4391,9 +4377,9 @@ } }, "node_modules/whatwg-fetch": { - "version": "3.6.19", - "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.19.tgz", - "integrity": "sha512-d67JP4dHSbm2TrpFj8AbO8DnL1JXL5J9u0Kq2xW6d0TFDbCA3Muhdt8orXC22utleTVj7Prqt82baN6RBvnEgw==", + "version": "3.6.20", + "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz", + "integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==", "dev": true }, "node_modules/whatwg-mimetype": { diff --git a/ecosystem-tests/node-ts-cjs/package-lock.json b/ecosystem-tests/node-ts-cjs/package-lock.json index c5280c5b5..c9493b515 100644 --- a/ecosystem-tests/node-ts-cjs/package-lock.json +++ b/ecosystem-tests/node-ts-cjs/package-lock.json @@ -1135,22 +1135,22 @@ } }, "node_modules/@types/node": { - "version": "20.11.20", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", - "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", + "version": "20.11.30", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz", + "integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==", "dev": true, "dependencies": { "undici-types": "~5.26.4" } }, "node_modules/@types/node-fetch": { - "version": "2.6.4", - "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.4.tgz", - "integrity": "sha512-1ZX9fcN4Rvkvgv4E6PAY5WXUFWFcRWxZa3EW83UjycOB9ljJCedb2CupIP4RZMEwF/M3eTcCihbBRgwtGbg5Rg==", + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz", + "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==", "dev": true, "dependencies": { "@types/node": "*", - "form-data": "^3.0.0" + "form-data": "^4.0.0" } }, "node_modules/@types/stack-utils": { @@ -2069,9 +2069,9 @@ } }, "node_modules/form-data": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz", - "integrity": "sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", "dev": true, "dependencies": { "asynckit": "^0.4.0", @@ -3175,20 +3175,6 @@ } } }, - "node_modules/jsdom/node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "dev": true, - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, "node_modules/jsdom/node_modules/tr46": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/tr46/-/tr46-3.0.0.tgz", @@ -4117,9 +4103,9 @@ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" }, "node_modules/ts-jest": { - "version": "29.1.1", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz", - "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==", + "version": "29.1.2", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz", + "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==", "dev": true, "dependencies": { "bs-logger": "0.x", @@ -4135,7 +4121,7 @@ "ts-jest": "cli.js" }, "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + "node": "^16.10.0 || ^18.0.0 || >=20.0.0" }, "peerDependencies": { "@babel/core": ">=7.0.0-beta.0 <8", @@ -4172,9 +4158,9 @@ } }, "node_modules/ts-jest/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" diff --git a/ecosystem-tests/node-ts-esm-auto/package-lock.json b/ecosystem-tests/node-ts-esm-auto/package-lock.json index 4bce04f80..3e4438d05 100644 --- a/ecosystem-tests/node-ts-esm-auto/package-lock.json +++ b/ecosystem-tests/node-ts-esm-auto/package-lock.json @@ -1157,9 +1157,9 @@ } }, "node_modules/@types/node": { - "version": "20.11.20", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", - "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", + "version": "20.11.30", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz", + "integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==", "dev": true, "dependencies": { "undici-types": "~5.26.4" @@ -3663,9 +3663,9 @@ } }, "node_modules/ts-jest": { - "version": "29.1.1", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz", - "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==", + "version": "29.1.2", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz", + "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==", "dev": true, "dependencies": { "bs-logger": "0.x", @@ -3681,7 +3681,7 @@ "ts-jest": "cli.js" }, "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + "node": "^16.10.0 || ^18.0.0 || >=20.0.0" }, "peerDependencies": { "@babel/core": ">=7.0.0-beta.0 <8", @@ -3718,9 +3718,9 @@ } }, "node_modules/ts-jest/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -3739,9 +3739,9 @@ "dev": true }, "node_modules/ts-node": { - "version": "10.9.1", - "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", - "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==", + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", "dev": true, "dependencies": { "@cspotcode/source-map-support": "^0.8.0", diff --git a/ecosystem-tests/node-ts-esm-web/package-lock.json b/ecosystem-tests/node-ts-esm-web/package-lock.json index b96128a4e..118bf0909 100644 --- a/ecosystem-tests/node-ts-esm-web/package-lock.json +++ b/ecosystem-tests/node-ts-esm-web/package-lock.json @@ -1157,9 +1157,9 @@ } }, "node_modules/@types/node": { - "version": "20.11.20", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", - "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", + "version": "20.11.30", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz", + "integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==", "dev": true, "dependencies": { "undici-types": "~5.26.4" @@ -3663,9 +3663,9 @@ } }, "node_modules/ts-jest": { - "version": "29.1.1", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz", - "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==", + "version": "29.1.2", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz", + "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==", "dev": true, "dependencies": { "bs-logger": "0.x", @@ -3681,7 +3681,7 @@ "ts-jest": "cli.js" }, "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + "node": "^16.10.0 || ^18.0.0 || >=20.0.0" }, "peerDependencies": { "@babel/core": ">=7.0.0-beta.0 <8", @@ -3718,9 +3718,9 @@ } }, "node_modules/ts-jest/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -3739,9 +3739,9 @@ "dev": true }, "node_modules/ts-node": { - "version": "10.9.1", - "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", - "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==", + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", "dev": true, "dependencies": { "@cspotcode/source-map-support": "^0.8.0", diff --git a/ecosystem-tests/node-ts-esm/package-lock.json b/ecosystem-tests/node-ts-esm/package-lock.json index 4aecff6ca..cb5b8eaa8 100644 --- a/ecosystem-tests/node-ts-esm/package-lock.json +++ b/ecosystem-tests/node-ts-esm/package-lock.json @@ -1157,9 +1157,9 @@ } }, "node_modules/@types/node": { - "version": "20.11.20", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", - "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", + "version": "20.11.30", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz", + "integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==", "dev": true, "dependencies": { "undici-types": "~5.26.4" @@ -3663,9 +3663,9 @@ } }, "node_modules/ts-jest": { - "version": "29.1.1", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz", - "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==", + "version": "29.1.2", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz", + "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==", "dev": true, "dependencies": { "bs-logger": "0.x", @@ -3681,7 +3681,7 @@ "ts-jest": "cli.js" }, "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + "node": "^16.10.0 || ^18.0.0 || >=20.0.0" }, "peerDependencies": { "@babel/core": ">=7.0.0-beta.0 <8", @@ -3718,9 +3718,9 @@ } }, "node_modules/ts-jest/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -3739,9 +3739,9 @@ "dev": true }, "node_modules/ts-node": { - "version": "10.9.1", - "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", - "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==", + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", "dev": true, "dependencies": { "@cspotcode/source-map-support": "^0.8.0", diff --git a/ecosystem-tests/node-ts4.5-jest27/package-lock.json b/ecosystem-tests/node-ts4.5-jest27/package-lock.json index 76813597f..bedd114f8 100644 --- a/ecosystem-tests/node-ts4.5-jest27/package-lock.json +++ b/ecosystem-tests/node-ts4.5-jest27/package-lock.json @@ -14,13 +14,13 @@ }, "devDependencies": { "@types/jest": "27.5.2", - "@types/node": "^20.4.2", + "@types/node": "20.11.20", "@types/node-fetch": "^2.6.1", "@types/ws": "^8.5.4", "fastest-levenshtein": "^1.0.16", "jest": "27.5.1", "ts-jest": "27.1.5", - "typescript": "4.5.4" + "typescript": "4.5.5" } }, "node_modules/@ampproject/remapping": { @@ -1077,9 +1077,9 @@ } }, "node_modules/@types/node-fetch": { - "version": "2.6.5", - "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.5.tgz", - "integrity": "sha512-OZsUlr2nxvkqUFLSaY2ZbA+P1q22q+KrlxWOn/38RX+u5kTkYL2mTujEpzUhGkS+K/QCYp9oagfXG39XOzyySg==", + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz", + "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==", "dev": true, "dependencies": { "@types/node": "*", @@ -4108,9 +4108,9 @@ } }, "node_modules/typescript": { - "version": "4.5.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.4.tgz", - "integrity": "sha512-VgYs2A2QIRuGphtzFV7aQJduJ2gyfTljngLzjpfW9FoYZF6xuw1W0vW9ghCKLfcWrCFxK81CSGRAvS1pn4fIUg==", + "version": "4.5.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.5.tgz", + "integrity": "sha512-TCTIul70LyWe6IJWT8QSYeA54WQe8EjQFU4wY52Fasj5UKx88LNYKCgBEHcOMOrFF1rKGbD8v/xcNWVUq9SymA==", "dev": true, "bin": { "tsc": "bin/tsc", diff --git a/ecosystem-tests/node-ts4.5-jest27/package.json b/ecosystem-tests/node-ts4.5-jest27/package.json index 1740acae8..ae76bcc9c 100644 --- a/ecosystem-tests/node-ts4.5-jest27/package.json +++ b/ecosystem-tests/node-ts4.5-jest27/package.json @@ -13,13 +13,13 @@ "tsconfig-paths": "^4.0.0" }, "devDependencies": { - "@types/node": "^20.4.2", + "@types/node": "20.11.20", "@types/node-fetch": "^2.6.1", "@types/jest": "27.5.2", "@types/ws": "^8.5.4", "fastest-levenshtein": "^1.0.16", "jest": "27.5.1", "ts-jest": "27.1.5", - "typescript": "4.5.4" + "typescript": "4.5.5" } } diff --git a/ecosystem-tests/ts-browser-webpack/package-lock.json b/ecosystem-tests/ts-browser-webpack/package-lock.json index b8f507e9b..686d0c2f9 100644 --- a/ecosystem-tests/ts-browser-webpack/package-lock.json +++ b/ecosystem-tests/ts-browser-webpack/package-lock.json @@ -6604,9 +6604,9 @@ "dev": true }, "node_modules/ts-node": { - "version": "10.9.1", - "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", - "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==", + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", "dev": true, "dependencies": { "@cspotcode/source-map-support": "^0.8.0", @@ -6978,9 +6978,9 @@ } }, "node_modules/webpack-dev-middleware": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", - "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", + "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", "dev": true, "dependencies": { "colorette": "^2.0.10", @@ -7001,9 +7001,9 @@ } }, "node_modules/webpack-dev-server": { - "version": "4.15.1", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.1.tgz", - "integrity": "sha512-5hbAst3h3C3L8w6W4P96L5vaV0PxSmJhxZvWKYIdgxOQm8pNZ5dEOmmSLBVpP85ReeyRt6AS1QJNyo/oFFPeVA==", + "version": "4.15.2", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz", + "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==", "dev": true, "dependencies": { "@types/bonjour": "^3.5.9", @@ -7034,7 +7034,7 @@ "serve-index": "^1.9.1", "sockjs": "^0.3.24", "spdy": "^4.0.2", - "webpack-dev-middleware": "^5.3.1", + "webpack-dev-middleware": "^5.3.4", "ws": "^8.13.0" }, "bin": { diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json index ebac7eb81..fdfe2952d 100644 --- a/ecosystem-tests/vercel-edge/package-lock.json +++ b/ecosystem-tests/vercel-edge/package-lock.json @@ -15,8 +15,8 @@ }, "devDependencies": { "@types/node": "20.3.3", - "@types/react": "18.2.58", - "@types/react-dom": "18.2.19", + "@types/react": "18.2.74", + "@types/react-dom": "18.2.23", "edge-runtime": "^2.4.3", "fastest-levenshtein": "^1.0.16", "jest": "^29.5.0", @@ -730,9 +730,9 @@ } }, "node_modules/@edge-runtime/format": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@edge-runtime/format/-/format-2.2.0.tgz", - "integrity": "sha512-gPrS6AVw/qJJL0vcxMXv4kFXCU3ZTCD1uuJpwX15YxHV8BgU9OG5v9LrkkXcr96PBT/9epypfNJMhlWADuEziw==", + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@edge-runtime/format/-/format-2.2.1.tgz", + "integrity": "sha512-JQTRVuiusQLNNLe2W9tnzBlV/GvSVcozLl4XZHk5swnRZ/v6jp8TqR8P7sqmJsQqblDZ3EztcWmLDbhRje/+8g==", "dev": true, "engines": { "node": ">=16" @@ -747,22 +747,31 @@ "node": ">=14" } }, + "node_modules/@edge-runtime/ponyfill": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/@edge-runtime/ponyfill/-/ponyfill-2.4.2.tgz", + "integrity": "sha512-oN17GjFr69chu6sDLvXxdhg0Qe8EZviGSuqzR9qOiKh4MhFYGdBBcqRNzdmYeAdeRzOW2mM9yil4RftUQ7sUOA==", + "dev": true, + "engines": { + "node": ">=16" + } + }, "node_modules/@edge-runtime/primitives": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@edge-runtime/primitives/-/primitives-3.1.0.tgz", - "integrity": "sha512-yxr1QM/lC8nrU38zxePeDqVeIjwsJ83gKGTH8YJ4CoHTv3q+6xEeqRIT+/9IPX/FApWYtnxHauhNqr6CHRj5YA==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@edge-runtime/primitives/-/primitives-4.1.0.tgz", + "integrity": "sha512-Vw0lbJ2lvRUqc7/soqygUX216Xb8T3WBZ987oywz6aJqRxcwSVWwr9e+Nqo2m9bxobA9mdbWNNoRY6S9eko1EQ==", "dev": true, "engines": { "node": ">=16" } }, "node_modules/@edge-runtime/vm": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@edge-runtime/vm/-/vm-3.1.0.tgz", - "integrity": "sha512-Y2JZgJP+4byI17SiDeEZhvBUvJ+om7E5ll/jrS7aGRpet5qKnJSsGep6xxhMjqT/j8ulFvTMN/kdlMMy5pEKBQ==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@edge-runtime/vm/-/vm-3.2.0.tgz", + "integrity": "sha512-0dEVyRLM/lG4gp1R/Ik5bfPl/1wX00xFwd5KcNH602tzBa09oF7pbTKETEhR1GjZ75K6OJnYFu8II2dyMhONMw==", "dev": true, "dependencies": { - "@edge-runtime/primitives": "3.1.0" + "@edge-runtime/primitives": "4.1.0" }, "engines": { "node": ">=16" @@ -1562,31 +1571,24 @@ "dev": true }, "node_modules/@types/react": { - "version": "18.2.58", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.58.tgz", - "integrity": "sha512-TaGvMNhxvG2Q0K0aYxiKfNDS5m5ZsoIBBbtfUorxdH4NGSXIlYvZxLJI+9Dd3KjeB3780bciLyAb7ylO8pLhPw==", + "version": "18.2.74", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.74.tgz", + "integrity": "sha512-9AEqNZZyBx8OdZpxzQlaFEVCSFUM2YXJH46yPOiOpm078k6ZLOCcuAzGum/zK8YBwY+dbahVNbHrbgrAwIRlqw==", "dev": true, "dependencies": { "@types/prop-types": "*", - "@types/scheduler": "*", "csstype": "^3.0.2" } }, "node_modules/@types/react-dom": { - "version": "18.2.19", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.19.tgz", - "integrity": "sha512-aZvQL6uUbIJpjZk4U8JZGbau9KDeAwMfmhyWorxgBkqDIEf6ROjRozcmPIicqsUwPUjbkDfHKgGee1Lq65APcA==", + "version": "18.2.23", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.23.tgz", + "integrity": "sha512-ZQ71wgGOTmDYpnav2knkjr3qXdAFu0vsk8Ci5w3pGAIdj7/kKAyn+VsQDhXsmzzzepAiI9leWMmubXz690AI/A==", "dev": true, "dependencies": { "@types/react": "*" } }, - "node_modules/@types/scheduler": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", - "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==", - "dev": true - }, "node_modules/@types/stack-utils": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", @@ -2940,17 +2942,17 @@ "dev": true }, "node_modules/edge-runtime": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/edge-runtime/-/edge-runtime-2.5.0.tgz", - "integrity": "sha512-QgDNX6R+RPwhY3+vqHpvYE4XUoB/cFG60nGBKu9pmPOJxQleeTCj2F5CHimIpNqex9h1Cy2Y3tuQ+Vq2GzmZIA==", + "version": "2.5.9", + "resolved": "https://registry.npmjs.org/edge-runtime/-/edge-runtime-2.5.9.tgz", + "integrity": "sha512-pk+k0oK0PVXdlT4oRp4lwh+unuKB7Ng4iZ2HB+EZ7QCEQizX360Rp/F4aRpgpRgdP2ufB35N+1KppHmYjqIGSg==", "dev": true, "dependencies": { - "@edge-runtime/format": "2.2.0", - "@edge-runtime/vm": "3.1.0", + "@edge-runtime/format": "2.2.1", + "@edge-runtime/ponyfill": "2.4.2", + "@edge-runtime/vm": "3.2.0", "async-listen": "3.0.1", "mri": "1.2.0", "picocolors": "1.0.0", - "pretty-bytes": "5.6.0", "pretty-ms": "7.0.1", "signal-exit": "4.0.2", "time-span": "4.0.0" @@ -6249,9 +6251,9 @@ "dev": true }, "node_modules/ts-jest": { - "version": "29.1.1", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz", - "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==", + "version": "29.1.2", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz", + "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==", "dev": true, "dependencies": { "bs-logger": "0.x", @@ -6267,7 +6269,7 @@ "ts-jest": "cli.js" }, "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + "node": "^16.10.0 || ^18.0.0 || >=20.0.0" }, "peerDependencies": { "@babel/core": ">=7.0.0-beta.0 <8", @@ -6292,9 +6294,9 @@ } }, "node_modules/ts-jest/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json index 171ba9c1a..48223796c 100644 --- a/ecosystem-tests/vercel-edge/package.json +++ b/ecosystem-tests/vercel-edge/package.json @@ -21,8 +21,8 @@ }, "devDependencies": { "@types/node": "20.3.3", - "@types/react": "18.2.58", - "@types/react-dom": "18.2.19", + "@types/react": "18.2.74", + "@types/react-dom": "18.2.23", "edge-runtime": "^2.4.3", "fastest-levenshtein": "^1.0.16", "jest": "^29.5.0", From c5eb4eaf7f7422bb3f9745d85c908d238d065fb3 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 2 Apr 2024 19:57:39 +0200 Subject: [PATCH 051/533] fix(tests): update wrangler to v3.19.0 (CVE-2023-7080) (#755) --- .../cloudflare-worker/package-lock.json | 750 ++++-------------- 1 file changed, 146 insertions(+), 604 deletions(-) diff --git a/ecosystem-tests/cloudflare-worker/package-lock.json b/ecosystem-tests/cloudflare-worker/package-lock.json index dd42f0b36..0673bb27c 100644 --- a/ecosystem-tests/cloudflare-worker/package-lock.json +++ b/ecosystem-tests/cloudflare-worker/package-lock.json @@ -671,9 +671,9 @@ } }, "node_modules/@cloudflare/workerd-darwin-64": { - "version": "1.20230814.1", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20230814.1.tgz", - "integrity": "sha512-aQUO7q7qXl+SVtOiMMlVKLNOSeL6GX43RKeflwzsD74dGgyHPiSfw5KCvXhkVbyN7u+yYF6HyFdaIvHLfn5jyA==", + "version": "1.20231030.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20231030.0.tgz", + "integrity": "sha512-J4PQ9utPxLya9yHdMMx3AZeC5M/6FxcoYw6jo9jbDDFTy+a4Gslqf4Im9We3aeOEdPXa3tgQHVQOSelJSZLhIw==", "cpu": [ "x64" ], @@ -687,9 +687,9 @@ } }, "node_modules/@cloudflare/workerd-darwin-arm64": { - "version": "1.20230814.1", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20230814.1.tgz", - "integrity": "sha512-U2mcgi+AiuI/4EY5Wk/GmygiNoCNw/V2mcHmxESqe4r6XbJYOzBdEsjnqJ05rqd0JlEM8m64jRtE6/qBnQHygg==", + "version": "1.20231030.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20231030.0.tgz", + "integrity": "sha512-WSJJjm11Del4hSneiNB7wTXGtBXI4QMCH9l5qf4iT5PAW8cESGcCmdHtWDWDtGAAGcvmLT04KNvmum92vRKKQQ==", "cpu": [ "arm64" ], @@ -703,9 +703,9 @@ } }, "node_modules/@cloudflare/workerd-linux-64": { - "version": "1.20230814.1", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20230814.1.tgz", - "integrity": "sha512-Q4kITXLTCuG2i2Z01fbb5AjVRRIf3+lS4ZVsFbTbIwtcOOG4Ozcw7ee7tKsFES7hFqR4Eg9gMG4/aS0mmi+L2g==", + "version": "1.20231030.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20231030.0.tgz", + "integrity": "sha512-2HUeRTvoCC17fxE0qdBeR7J9dO8j4A8ZbdcvY8pZxdk+zERU6+N03RTbk/dQMU488PwiDvcC3zZqS4gwLfVT8g==", "cpu": [ "x64" ], @@ -719,9 +719,9 @@ } }, "node_modules/@cloudflare/workerd-linux-arm64": { - "version": "1.20230814.1", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20230814.1.tgz", - "integrity": "sha512-BX5SaksXw+pkREVw3Rw2eSNXplqZw+14CcwW/5x/4oq/C6yn5qCvKxJfM7pukJGMI4wkJPOYops7B3g27FB/HA==", + "version": "1.20231030.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20231030.0.tgz", + "integrity": "sha512-4/GK5zHh+9JbUI6Z5xTCM0ZmpKKHk7vu9thmHjUxtz+o8Ne9DoD7DlDvXQWgMF6XGaTubDWyp3ttn+Qv8jDFuQ==", "cpu": [ "arm64" ], @@ -735,9 +735,9 @@ } }, "node_modules/@cloudflare/workerd-windows-64": { - "version": "1.20230814.1", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20230814.1.tgz", - "integrity": "sha512-GWHqfyhsG/1wm2W8afkYX3q3fWXUWWD8NGtHfAs6ZVTHdW3mmYyMhKR0lc6ptBwz5i5aXRlP2S+CxxxwwDbKpw==", + "version": "1.20231030.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20231030.0.tgz", + "integrity": "sha512-fb/Jgj8Yqy3PO1jLhk7mTrHMkR8jklpbQFud6rL/aMAn5d6MQbaSrYOCjzkKGp0Zng8D2LIzSl+Fc0C9Sggxjg==", "cpu": [ "x64" ], @@ -757,18 +757,18 @@ "dev": true }, "node_modules/@esbuild-plugins/node-globals-polyfill": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/@esbuild-plugins/node-globals-polyfill/-/node-globals-polyfill-0.1.1.tgz", - "integrity": "sha512-MR0oAA+mlnJWrt1RQVQ+4VYuRJW/P2YmRTv1AsplObyvuBMnPHiizUF95HHYiSsMGLhyGtWufaq2XQg6+iurBg==", + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@esbuild-plugins/node-globals-polyfill/-/node-globals-polyfill-0.2.3.tgz", + "integrity": "sha512-r3MIryXDeXDOZh7ih1l/yE9ZLORCd5e8vWg02azWRGj5SPTuoh69A2AIyn0Z31V/kHBfZ4HgWJ+OK3GTTwLmnw==", "dev": true, "peerDependencies": { "esbuild": "*" } }, "node_modules/@esbuild-plugins/node-modules-polyfill": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/@esbuild-plugins/node-modules-polyfill/-/node-modules-polyfill-0.1.4.tgz", - "integrity": "sha512-uZbcXi0zbmKC/050p3gJnne5Qdzw8vkXIv+c2BW0Lsc1ji1SkrxbKPUy5Efr0blbTu1SL8w4eyfpnSdPg3G0Qg==", + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/@esbuild-plugins/node-modules-polyfill/-/node-modules-polyfill-0.2.2.tgz", + "integrity": "sha512-LXV7QsWJxRuMYvKbiznh+U1ilIop3g2TeKRzUxOG5X3YITc8JyyTa90BmLwqqv0YnX4v32CSlG+vsziZp9dMvA==", "dev": true, "dependencies": { "escape-string-regexp": "^4.0.0", @@ -791,9 +791,9 @@ } }, "node_modules/@esbuild/android-arm": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.16.3.tgz", - "integrity": "sha512-mueuEoh+s1eRbSJqq9KNBQwI4QhQV6sRXIfTyLXSHGMpyew61rOK4qY21uKbXl1iBoMb0AdL1deWFCQVlN2qHA==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.17.19.tgz", + "integrity": "sha512-rIKddzqhmav7MSmoFCmDIb6e2W57geRsM94gV2l38fzhXMwq7hZoClug9USI2pFRGL06f4IOPHHpFNOkWieR8A==", "cpu": [ "arm" ], @@ -807,9 +807,9 @@ } }, "node_modules/@esbuild/android-arm64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.16.3.tgz", - "integrity": "sha512-RolFVeinkeraDvN/OoRf1F/lP0KUfGNb5jxy/vkIMeRRChkrX/HTYN6TYZosRJs3a1+8wqpxAo5PI5hFmxyPRg==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.17.19.tgz", + "integrity": "sha512-KBMWvEZooR7+kzY0BtbTQn0OAYY7CsiydT63pVEaPtVYF0hXbUaOyZog37DKxK7NF3XacBJOpYT4adIJh+avxA==", "cpu": [ "arm64" ], @@ -823,9 +823,9 @@ } }, "node_modules/@esbuild/android-x64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.16.3.tgz", - "integrity": "sha512-SFpTUcIT1bIJuCCBMCQWq1bL2gPTjWoLZdjmIhjdcQHaUfV41OQfho6Ici5uvvkMmZRXIUGpM3GxysP/EU7ifQ==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.17.19.tgz", + "integrity": "sha512-uUTTc4xGNDT7YSArp/zbtmbhO0uEEK9/ETW29Wk1thYUJBz3IVnvgEiEwEa9IeLyvnpKrWK64Utw2bgUmDveww==", "cpu": [ "x64" ], @@ -839,9 +839,9 @@ } }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.16.3.tgz", - "integrity": "sha512-DO8WykMyB+N9mIDfI/Hug70Dk1KipavlGAecxS3jDUwAbTpDXj0Lcwzw9svkhxfpCagDmpaTMgxWK8/C/XcXvw==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.17.19.tgz", + "integrity": "sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==", "cpu": [ "arm64" ], @@ -855,9 +855,9 @@ } }, "node_modules/@esbuild/darwin-x64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.16.3.tgz", - "integrity": "sha512-uEqZQ2omc6BvWqdCiyZ5+XmxuHEi1SPzpVxXCSSV2+Sh7sbXbpeNhHIeFrIpRjAs0lI1FmA1iIOxFozKBhKgRQ==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.17.19.tgz", + "integrity": "sha512-IJM4JJsLhRYr9xdtLytPLSH9k/oxR3boaUIYiHkAawtwNOXKE8KoU8tMvryogdcT8AU+Bflmh81Xn6Q0vTZbQw==", "cpu": [ "x64" ], @@ -871,9 +871,9 @@ } }, "node_modules/@esbuild/freebsd-arm64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.16.3.tgz", - "integrity": "sha512-nJansp3sSXakNkOD5i5mIz2Is/HjzIhFs49b1tjrPrpCmwgBmH9SSzhC/Z1UqlkivqMYkhfPwMw1dGFUuwmXhw==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.17.19.tgz", + "integrity": "sha512-pBwbc7DufluUeGdjSU5Si+P3SoMF5DQ/F/UmTSb8HXO80ZEAJmrykPyzo1IfNbAoaqw48YRpv8shwd1NoI0jcQ==", "cpu": [ "arm64" ], @@ -887,9 +887,9 @@ } }, "node_modules/@esbuild/freebsd-x64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.16.3.tgz", - "integrity": "sha512-TfoDzLw+QHfc4a8aKtGSQ96Wa+6eimljjkq9HKR0rHlU83vw8aldMOUSJTUDxbcUdcgnJzPaX8/vGWm7vyV7ug==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.17.19.tgz", + "integrity": "sha512-4lu+n8Wk0XlajEhbEffdy2xy53dpR06SlzvhGByyg36qJw6Kpfk7cp45DR/62aPH9mtJRmIyrXAS5UWBrJT6TQ==", "cpu": [ "x64" ], @@ -903,9 +903,9 @@ } }, "node_modules/@esbuild/linux-arm": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.16.3.tgz", - "integrity": "sha512-VwswmSYwVAAq6LysV59Fyqk3UIjbhuc6wb3vEcJ7HEJUtFuLK9uXWuFoH1lulEbE4+5GjtHi3MHX+w1gNHdOWQ==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.17.19.tgz", + "integrity": "sha512-cdmT3KxjlOQ/gZ2cjfrQOtmhG4HJs6hhvm3mWSRDPtZ/lP5oe8FWceS10JaSJC13GBd4eH/haHnqf7hhGNLerA==", "cpu": [ "arm" ], @@ -919,9 +919,9 @@ } }, "node_modules/@esbuild/linux-arm64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.16.3.tgz", - "integrity": "sha512-7I3RlsnxEFCHVZNBLb2w7unamgZ5sVwO0/ikE2GaYvYuUQs9Qte/w7TqWcXHtCwxvZx/2+F97ndiUQAWs47ZfQ==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.17.19.tgz", + "integrity": "sha512-ct1Tg3WGwd3P+oZYqic+YZF4snNl2bsnMKRkb3ozHmnM0dGWuxcPTTntAF6bOP0Sp4x0PjSF+4uHQ1xvxfRKqg==", "cpu": [ "arm64" ], @@ -935,9 +935,9 @@ } }, "node_modules/@esbuild/linux-ia32": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.16.3.tgz", - "integrity": "sha512-X8FDDxM9cqda2rJE+iblQhIMYY49LfvW4kaEjoFbTTQ4Go8G96Smj2w3BRTwA8IHGoi9dPOPGAX63dhuv19UqA==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.17.19.tgz", + "integrity": "sha512-w4IRhSy1VbsNxHRQpeGCHEmibqdTUx61Vc38APcsRbuVgK0OPEnQ0YD39Brymn96mOx48Y2laBQGqgZ0j9w6SQ==", "cpu": [ "ia32" ], @@ -951,9 +951,9 @@ } }, "node_modules/@esbuild/linux-loong64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.16.3.tgz", - "integrity": "sha512-hIbeejCOyO0X9ujfIIOKjBjNAs9XD/YdJ9JXAy1lHA+8UXuOqbFe4ErMCqMr8dhlMGBuvcQYGF7+kO7waj2KHw==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.17.19.tgz", + "integrity": "sha512-2iAngUbBPMq439a+z//gE+9WBldoMp1s5GWsUSgqHLzLJ9WoZLZhpwWuym0u0u/4XmZ3gpHmzV84PonE+9IIdQ==", "cpu": [ "loong64" ], @@ -967,9 +967,9 @@ } }, "node_modules/@esbuild/linux-mips64el": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.16.3.tgz", - "integrity": "sha512-znFRzICT/V8VZQMt6rjb21MtAVJv/3dmKRMlohlShrbVXdBuOdDrGb+C2cZGQAR8RFyRe7HS6klmHq103WpmVw==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.17.19.tgz", + "integrity": "sha512-LKJltc4LVdMKHsrFe4MGNPp0hqDFA1Wpt3jE1gEyM3nKUvOiO//9PheZZHfYRfYl6AwdTH4aTcXSqBerX0ml4A==", "cpu": [ "mips64el" ], @@ -983,9 +983,9 @@ } }, "node_modules/@esbuild/linux-ppc64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.16.3.tgz", - "integrity": "sha512-EV7LuEybxhXrVTDpbqWF2yehYRNz5e5p+u3oQUS2+ZFpknyi1NXxr8URk4ykR8Efm7iu04//4sBg249yNOwy5Q==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.17.19.tgz", + "integrity": "sha512-/c/DGybs95WXNS8y3Ti/ytqETiW7EU44MEKuCAcpPto3YjQbyK3IQVKfF6nbghD7EcLUGl0NbiL5Rt5DMhn5tg==", "cpu": [ "ppc64" ], @@ -999,9 +999,9 @@ } }, "node_modules/@esbuild/linux-riscv64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.16.3.tgz", - "integrity": "sha512-uDxqFOcLzFIJ+r/pkTTSE9lsCEaV/Y6rMlQjUI9BkzASEChYL/aSQjZjchtEmdnVxDKETnUAmsaZ4pqK1eE5BQ==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.17.19.tgz", + "integrity": "sha512-FC3nUAWhvFoutlhAkgHf8f5HwFWUL6bYdvLc/TTuxKlvLi3+pPzdZiFKSWz/PF30TB1K19SuCxDTI5KcqASJqA==", "cpu": [ "riscv64" ], @@ -1015,9 +1015,9 @@ } }, "node_modules/@esbuild/linux-s390x": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.16.3.tgz", - "integrity": "sha512-NbeREhzSxYwFhnCAQOQZmajsPYtX71Ufej3IQ8W2Gxskfz9DK58ENEju4SbpIj48VenktRASC52N5Fhyf/aliQ==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.17.19.tgz", + "integrity": "sha512-IbFsFbxMWLuKEbH+7sTkKzL6NJmG2vRyy6K7JJo55w+8xDk7RElYn6xvXtDW8HCfoKBFK69f3pgBJSUSQPr+4Q==", "cpu": [ "s390x" ], @@ -1031,9 +1031,9 @@ } }, "node_modules/@esbuild/linux-x64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.16.3.tgz", - "integrity": "sha512-SDiG0nCixYO9JgpehoKgScwic7vXXndfasjnD5DLbp1xltANzqZ425l7LSdHynt19UWOcDjG9wJJzSElsPvk0w==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.17.19.tgz", + "integrity": "sha512-68ngA9lg2H6zkZcyp22tsVt38mlhWde8l3eJLWkyLrp4HwMUr3c1s/M2t7+kHIhvMjglIBrFpncX1SzMckomGw==", "cpu": [ "x64" ], @@ -1047,9 +1047,9 @@ } }, "node_modules/@esbuild/netbsd-x64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.16.3.tgz", - "integrity": "sha512-AzbsJqiHEq1I/tUvOfAzCY15h4/7Ivp3ff/o1GpP16n48JMNAtbW0qui2WCgoIZArEHD0SUQ95gvR0oSO7ZbdA==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.17.19.tgz", + "integrity": "sha512-CwFq42rXCR8TYIjIfpXCbRX0rp1jo6cPIUPSaWwzbVI4aOfX96OXY8M6KNmtPcg7QjYeDmN+DD0Wp3LaBOLf4Q==", "cpu": [ "x64" ], @@ -1063,9 +1063,9 @@ } }, "node_modules/@esbuild/openbsd-x64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.16.3.tgz", - "integrity": "sha512-gSABi8qHl8k3Cbi/4toAzHiykuBuWLZs43JomTcXkjMZVkp0gj3gg9mO+9HJW/8GB5H89RX/V0QP4JGL7YEEVg==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.17.19.tgz", + "integrity": "sha512-cnq5brJYrSZ2CF6c35eCmviIN3k3RczmHz8eYaVlNasVqsNY+JKohZU5MKmaOI+KkllCdzOKKdPs762VCPC20g==", "cpu": [ "x64" ], @@ -1079,9 +1079,9 @@ } }, "node_modules/@esbuild/sunos-x64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.16.3.tgz", - "integrity": "sha512-SF9Kch5Ete4reovvRO6yNjMxrvlfT0F0Flm+NPoUw5Z4Q3r1d23LFTgaLwm3Cp0iGbrU/MoUI+ZqwCv5XJijCw==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.17.19.tgz", + "integrity": "sha512-vCRT7yP3zX+bKWFeP/zdS6SqdWB8OIpaRq/mbXQxTGHnIxspRtigpkUcDMlSCOejlHowLqII7K2JKevwyRP2rg==", "cpu": [ "x64" ], @@ -1095,9 +1095,9 @@ } }, "node_modules/@esbuild/win32-arm64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.16.3.tgz", - "integrity": "sha512-u5aBonZIyGopAZyOnoPAA6fGsDeHByZ9CnEzyML9NqntK6D/xl5jteZUKm/p6nD09+v3pTM6TuUIqSPcChk5gg==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.17.19.tgz", + "integrity": "sha512-yYx+8jwowUstVdorcMdNlzklLYhPxjniHWFKgRqH7IFlUEa0Umu3KuYplf1HUZZ422e3NU9F4LGb+4O0Kdcaag==", "cpu": [ "arm64" ], @@ -1111,9 +1111,9 @@ } }, "node_modules/@esbuild/win32-ia32": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.16.3.tgz", - "integrity": "sha512-GlgVq1WpvOEhNioh74TKelwla9KDuAaLZrdxuuUgsP2vayxeLgVc+rbpIv0IYF4+tlIzq2vRhofV+KGLD+37EQ==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.17.19.tgz", + "integrity": "sha512-eggDKanJszUtCdlVs0RB+h35wNlb5v4TWEkq4vZcmVt5u/HiDZrTXe2bWFQUez3RgNHwx/x4sk5++4NSSicKkw==", "cpu": [ "ia32" ], @@ -1127,9 +1127,9 @@ } }, "node_modules/@esbuild/win32-x64": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.16.3.tgz", - "integrity": "sha512-5/JuTd8OWW8UzEtyf19fbrtMJENza+C9JoPIkvItgTBQ1FO2ZLvjbPO6Xs54vk0s5JB5QsfieUEshRQfu7ZHow==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.17.19.tgz", + "integrity": "sha512-lAhycmKnVOuRYNtRtatQR1LPQf2oYCkRGkSFnseDAKPl8lu5SOsK/e1sXe5a0Pc5kHIHe6P2I/ilntNv2xf3cA==", "cpu": [ "x64" ], @@ -1881,37 +1881,6 @@ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "dev": true }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/better-sqlite3": { - "version": "8.5.2", - "resolved": "https://registry.npmjs.org/better-sqlite3/-/better-sqlite3-8.5.2.tgz", - "integrity": "sha512-w/EZ/jwuZF+/47mAVC2+rhR2X/gwkZ+fd1pbX7Y90D5NRaRzDQcxrHY10t6ijGiYIonCVsBSF5v1cay07bP5sg==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "bindings": "^1.5.0", - "prebuild-install": "^7.1.0" - } - }, "node_modules/binary-extensions": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", @@ -1921,26 +1890,6 @@ "node": ">=8" } }, - "node_modules/bindings": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", - "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", - "dev": true, - "dependencies": { - "file-uri-to-path": "1.0.0" - } - }, - "node_modules/bl": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", - "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", - "dev": true, - "dependencies": { - "buffer": "^5.5.0", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - } - }, "node_modules/blake3-wasm": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/blake3-wasm/-/blake3-wasm-2.1.5.tgz", @@ -2028,30 +1977,6 @@ "node-int64": "^0.4.0" } }, - "node_modules/buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, "node_modules/buffer-from": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", @@ -2179,12 +2104,6 @@ "fsevents": "~2.3.2" } }, - "node_modules/chownr": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", - "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", - "dev": true - }, "node_modules/ci-info": { "version": "3.8.0", "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", @@ -2347,21 +2266,6 @@ } } }, - "node_modules/decompress-response": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", - "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", - "dev": true, - "dependencies": { - "mimic-response": "^3.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/dedent": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.1.tgz", @@ -2376,15 +2280,6 @@ } } }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "dev": true, - "engines": { - "node": ">=4.0.0" - } - }, "node_modules/deepmerge": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", @@ -2403,15 +2298,6 @@ "node": ">=0.4.0" } }, - "node_modules/detect-libc": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz", - "integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, "node_modules/detect-newline": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", @@ -2460,15 +2346,6 @@ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "dev": true }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dev": true, - "dependencies": { - "once": "^1.4.0" - } - }, "node_modules/error-ex": { "version": "1.3.2", "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", @@ -2479,9 +2356,9 @@ } }, "node_modules/esbuild": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.16.3.tgz", - "integrity": "sha512-71f7EjPWTiSguen8X/kxEpkAS7BFHwtQKisCDDV3Y4GLGWBaoSCyD5uXkaUew6JDzA9FEN1W23mdnSwW9kqCeg==", + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.17.19.tgz", + "integrity": "sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==", "dev": true, "hasInstallScript": true, "bin": { @@ -2491,28 +2368,28 @@ "node": ">=12" }, "optionalDependencies": { - "@esbuild/android-arm": "0.16.3", - "@esbuild/android-arm64": "0.16.3", - "@esbuild/android-x64": "0.16.3", - "@esbuild/darwin-arm64": "0.16.3", - "@esbuild/darwin-x64": "0.16.3", - "@esbuild/freebsd-arm64": "0.16.3", - "@esbuild/freebsd-x64": "0.16.3", - "@esbuild/linux-arm": "0.16.3", - "@esbuild/linux-arm64": "0.16.3", - "@esbuild/linux-ia32": "0.16.3", - "@esbuild/linux-loong64": "0.16.3", - "@esbuild/linux-mips64el": "0.16.3", - "@esbuild/linux-ppc64": "0.16.3", - "@esbuild/linux-riscv64": "0.16.3", - "@esbuild/linux-s390x": "0.16.3", - "@esbuild/linux-x64": "0.16.3", - "@esbuild/netbsd-x64": "0.16.3", - "@esbuild/openbsd-x64": "0.16.3", - "@esbuild/sunos-x64": "0.16.3", - "@esbuild/win32-arm64": "0.16.3", - "@esbuild/win32-ia32": "0.16.3", - "@esbuild/win32-x64": "0.16.3" + "@esbuild/android-arm": "0.17.19", + "@esbuild/android-arm64": "0.17.19", + "@esbuild/android-x64": "0.17.19", + "@esbuild/darwin-arm64": "0.17.19", + "@esbuild/darwin-x64": "0.17.19", + "@esbuild/freebsd-arm64": "0.17.19", + "@esbuild/freebsd-x64": "0.17.19", + "@esbuild/linux-arm": "0.17.19", + "@esbuild/linux-arm64": "0.17.19", + "@esbuild/linux-ia32": "0.17.19", + "@esbuild/linux-loong64": "0.17.19", + "@esbuild/linux-mips64el": "0.17.19", + "@esbuild/linux-ppc64": "0.17.19", + "@esbuild/linux-riscv64": "0.17.19", + "@esbuild/linux-s390x": "0.17.19", + "@esbuild/linux-x64": "0.17.19", + "@esbuild/netbsd-x64": "0.17.19", + "@esbuild/openbsd-x64": "0.17.19", + "@esbuild/sunos-x64": "0.17.19", + "@esbuild/win32-arm64": "0.17.19", + "@esbuild/win32-ia32": "0.17.19", + "@esbuild/win32-x64": "0.17.19" } }, "node_modules/escalade": { @@ -2611,15 +2488,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/expand-template": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", - "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==", - "dev": true, - "engines": { - "node": ">=6" - } - }, "node_modules/expect": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", @@ -2682,12 +2550,6 @@ "node": "^12.20 || >= 14.13" } }, - "node_modules/file-uri-to-path": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", - "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", - "dev": true - }, "node_modules/fill-range": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", @@ -2764,12 +2626,6 @@ "integrity": "sha512-twe20eF1OxVxp/ML/kq2p1uc6KvFK/+vs8WjEbeKmV2He22MKm7YF2ANIt+EOqhJ5L3K/SuuPhk0hWQDjOM23g==", "dev": true }, - "node_modules/fs-constants": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", - "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", - "dev": true - }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", @@ -2854,12 +2710,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/github-from-package": { - "version": "0.0.0", - "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", - "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==", - "dev": true - }, "node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", @@ -2940,12 +2790,6 @@ "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", "dev": true }, - "node_modules/http-cache-semantics": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", - "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==", - "dev": true - }, "node_modules/human-signals": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", @@ -2955,26 +2799,6 @@ "node": ">=10.17.0" } }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, "node_modules/import-local": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", @@ -3019,12 +2843,6 @@ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", "dev": true }, - "node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "dev": true - }, "node_modules/is-arrayish": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", @@ -4075,54 +3893,32 @@ "node": ">=6" } }, - "node_modules/mimic-response": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", - "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/miniflare": { - "version": "3.20230814.1", - "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-3.20230814.1.tgz", - "integrity": "sha512-LMgqd1Ut0+fnlvQepVbbBYQczQnyuuap8bgUwOyPETka0S9NR9NxMQSNaBgVZ0uOaG7xMJ/OVTRlz+TGB86PWA==", + "version": "3.20231030.3", + "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-3.20231030.3.tgz", + "integrity": "sha512-lquHSh0XiO8uoWDujOLHtDS9mkUTJTc5C5amiQ6A++5y0f+DWiMqbDBvvwjlYf4Dvqk6ChFya9dztk7fg2ZVxA==", "dev": true, "dependencies": { "acorn": "^8.8.0", "acorn-walk": "^8.2.0", - "better-sqlite3": "^8.1.0", "capnp-ts": "^0.7.0", "exit-hook": "^2.2.1", "glob-to-regexp": "^0.4.1", - "http-cache-semantics": "^4.1.0", - "kleur": "^4.1.5", - "set-cookie-parser": "^2.6.0", "source-map-support": "0.5.21", "stoppable": "^1.1.0", - "undici": "^5.13.0", - "workerd": "1.20230814.1", + "undici": "^5.22.1", + "workerd": "1.20231030.0", "ws": "^8.11.0", "youch": "^3.2.2", "zod": "^3.20.6" }, + "bin": { + "miniflare": "bootstrap.js" + }, "engines": { "node": ">=16.13" } }, - "node_modules/miniflare/node_modules/kleur": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", - "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, "node_modules/miniflare/node_modules/source-map-support": { "version": "0.5.21", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", @@ -4154,12 +3950,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/mkdirp-classic": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", - "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", - "dev": true - }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", @@ -4193,63 +3983,12 @@ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, - "node_modules/napi-build-utils": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-1.0.2.tgz", - "integrity": "sha512-ONmRUqK7zj7DWX0D9ADe03wbwOBZxNAfF20PlGfCWQcD3+/MakShIHrMqx9YwPTfxDdF1zLeL+RGZiR9kGMLdg==", - "dev": true - }, "node_modules/natural-compare": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", "dev": true }, - "node_modules/node-abi": { - "version": "3.47.0", - "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.47.0.tgz", - "integrity": "sha512-2s6B2CWZM//kPgwnuI0KrYwNjfdByE25zvAaEpq9IH4zcNsarH8Ihu/UuX6XMPEogDAxkuUFeZn60pXNHAqn3A==", - "dev": true, - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/node-abi/node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/node-abi/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/node-abi/node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - }, "node_modules/node-domexception": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", @@ -4507,32 +4246,6 @@ "node": ">=8" } }, - "node_modules/prebuild-install": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.1.tgz", - "integrity": "sha512-jAXscXWMcCK8GgCoHOfIr0ODh5ai8mj63L2nWrjuAgXE6tDyYGnx4/8o/rCgU+B4JSyZBKbeZqzhtwtC3ovxjw==", - "dev": true, - "dependencies": { - "detect-libc": "^2.0.0", - "expand-template": "^2.0.3", - "github-from-package": "0.0.0", - "minimist": "^1.2.3", - "mkdirp-classic": "^0.5.3", - "napi-build-utils": "^1.0.1", - "node-abi": "^3.3.0", - "pump": "^3.0.0", - "rc": "^1.2.7", - "simple-get": "^4.0.0", - "tar-fs": "^2.0.0", - "tunnel-agent": "^0.6.0" - }, - "bin": { - "prebuild-install": "bin.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/pretty-format": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", @@ -4599,16 +4312,6 @@ "node": ">= 0.10" } }, - "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dev": true, - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, "node_modules/pure-rand": { "version": "6.0.4", "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.0.4.tgz", @@ -4625,50 +4328,12 @@ } ] }, - "node_modules/rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "dev": true, - "dependencies": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "bin": { - "rc": "cli.js" - } - }, - "node_modules/rc/node_modules/strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/react-is": { "version": "18.2.0", "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==", "dev": true }, - "node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "dev": true, - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, "node_modules/readdirp": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", @@ -4776,26 +4441,6 @@ "tslib": "^2.1.0" } }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, "node_modules/selfsigned": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.1.1.tgz", @@ -4817,12 +4462,6 @@ "semver": "bin/semver.js" } }, - "node_modules/set-cookie-parser": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.6.0.tgz", - "integrity": "sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==", - "dev": true - }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -4850,51 +4489,6 @@ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "dev": true }, - "node_modules/simple-concat": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", - "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/simple-get": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz", - "integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "dependencies": { - "decompress-response": "^6.0.0", - "once": "^1.3.1", - "simple-concat": "^1.0.0" - } - }, "node_modules/sisteransi": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", @@ -5028,15 +4622,6 @@ "node": ">=10.0.0" } }, - "node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "dev": true, - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, "node_modules/string-length": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", @@ -5130,34 +4715,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/tar-fs": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz", - "integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==", - "dev": true, - "dependencies": { - "chownr": "^1.1.1", - "mkdirp-classic": "^0.5.2", - "pump": "^3.0.0", - "tar-stream": "^2.1.4" - } - }, - "node_modules/tar-stream": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", - "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", - "dev": true, - "dependencies": { - "bl": "^4.0.3", - "end-of-stream": "^1.4.1", - "fs-constants": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^3.1.1" - }, - "engines": { - "node": ">=6" - } - }, "node_modules/test-exclude": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", @@ -5287,18 +4844,6 @@ "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==", "dev": true }, - "node_modules/tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", - "dev": true, - "dependencies": { - "safe-buffer": "^5.0.1" - }, - "engines": { - "node": "*" - } - }, "node_modules/type-detect": { "version": "4.0.8", "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", @@ -5375,12 +4920,6 @@ "browserslist": ">= 4.21.0" } }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "dev": true - }, "node_modules/v8-to-istanbul": { "version": "9.2.0", "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.2.0.tgz", @@ -5447,9 +4986,9 @@ } }, "node_modules/workerd": { - "version": "1.20230814.1", - "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20230814.1.tgz", - "integrity": "sha512-zJeSEteXuAD+bpYJT8WvzTAHvIAkKPVxOV+Jy6zCLKz5e08N3OUbAF+wrvGWc8b2aB1sj+IYsdXfkv4puH+qXQ==", + "version": "1.20231030.0", + "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20231030.0.tgz", + "integrity": "sha512-+FSW+d31f8RrjHanFf/R9A+Z0csf3OtsvzdPmAKuwuZm/5HrBv83cvG9fFeTxl7/nI6irUUXIRF9xcj/NomQzQ==", "dev": true, "hasInstallScript": true, "bin": { @@ -5459,30 +4998,32 @@ "node": ">=16" }, "optionalDependencies": { - "@cloudflare/workerd-darwin-64": "1.20230814.1", - "@cloudflare/workerd-darwin-arm64": "1.20230814.1", - "@cloudflare/workerd-linux-64": "1.20230814.1", - "@cloudflare/workerd-linux-arm64": "1.20230814.1", - "@cloudflare/workerd-windows-64": "1.20230814.1" + "@cloudflare/workerd-darwin-64": "1.20231030.0", + "@cloudflare/workerd-darwin-arm64": "1.20231030.0", + "@cloudflare/workerd-linux-64": "1.20231030.0", + "@cloudflare/workerd-linux-arm64": "1.20231030.0", + "@cloudflare/workerd-windows-64": "1.20231030.0" } }, "node_modules/wrangler": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-3.6.0.tgz", - "integrity": "sha512-GWs4+gIUK+086svW/TgFhhxxrl/hdW2L7WASbdc10dJT7yFmCXse0SnHiqWUxbFu3ScP2t3a3LszJ08wwolWHg==", + "version": "3.19.0", + "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-3.19.0.tgz", + "integrity": "sha512-pY7xWqkQn6DJ+1vz9YHz2pCftEmK+JCTj9sqnucp0NZnlUiILDmBWegsjjCLZycgfiA62J213N7NvjLPr2LB8w==", "dev": true, "dependencies": { "@cloudflare/kv-asset-handler": "^0.2.0", - "@esbuild-plugins/node-globals-polyfill": "^0.1.1", - "@esbuild-plugins/node-modules-polyfill": "^0.1.4", + "@esbuild-plugins/node-globals-polyfill": "^0.2.3", + "@esbuild-plugins/node-modules-polyfill": "^0.2.2", "blake3-wasm": "^2.1.5", "chokidar": "^3.5.3", - "esbuild": "0.16.3", - "miniflare": "3.20230814.1", + "esbuild": "0.17.19", + "miniflare": "3.20231030.3", "nanoid": "^3.3.3", "path-to-regexp": "^6.2.0", + "resolve.exports": "^2.0.2", "selfsigned": "^2.0.1", - "source-map": "^0.7.4", + "source-map": "0.6.1", + "source-map-support": "0.5.21", "xxhash-wasm": "^1.0.1" }, "bin": { @@ -5490,19 +5031,20 @@ "wrangler2": "bin/wrangler.js" }, "engines": { - "node": ">=16.13.0" + "node": ">=16.17.0" }, "optionalDependencies": { "fsevents": "~2.3.2" } }, - "node_modules/wrangler/node_modules/source-map": { - "version": "0.7.4", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz", - "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "node_modules/wrangler/node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", "dev": true, - "engines": { - "node": ">= 8" + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" } }, "node_modules/wrap-ansi": { From e2d5d2bcac0aaa48c98931381a1fdc53c16c73f0 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 4 Apr 2024 07:32:06 -0400 Subject: [PATCH 052/533] fix(streaming): handle special line characters and fix multi-byte character decoding (#757) --- src/streaming.ts | 120 +++++++++++++++----- tests/streaming.test.ts | 245 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 338 insertions(+), 27 deletions(-) diff --git a/src/streaming.ts b/src/streaming.ts index 6b0f2a345..722a8f69c 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -23,29 +23,6 @@ export class Stream implements AsyncIterable { static fromSSEResponse(response: Response, controller: AbortController) { let consumed = false; - const decoder = new SSEDecoder(); - - async function* iterMessages(): AsyncGenerator { - if (!response.body) { - controller.abort(); - throw new OpenAIError(`Attempted to iterate over a response with no body`); - } - - const lineDecoder = new LineDecoder(); - - const iter = readableStreamAsyncIterable(response.body); - for await (const chunk of iter) { - for (const line of lineDecoder.decode(chunk)) { - const sse = decoder.decode(line); - if (sse) yield sse; - } - } - - for (const line of lineDecoder.flush()) { - const sse = decoder.decode(line); - if (sse) yield sse; - } - } async function* iterator(): AsyncIterator { if (consumed) { @@ -54,7 +31,7 @@ export class Stream implements AsyncIterable { consumed = true; let done = false; try { - for await (const sse of iterMessages()) { + for await (const sse of _iterSSEMessages(response, controller)) { if (done) continue; if (sse.data.startsWith('[DONE]')) { @@ -220,6 +197,97 @@ export class Stream implements AsyncIterable { } } +export async function* _iterSSEMessages( + response: Response, + controller: AbortController, +): AsyncGenerator { + if (!response.body) { + controller.abort(); + throw new OpenAIError(`Attempted to iterate over a response with no body`); + } + + const sseDecoder = new SSEDecoder(); + const lineDecoder = new LineDecoder(); + + const iter = readableStreamAsyncIterable(response.body); + for await (const sseChunk of iterSSEChunks(iter)) { + for (const line of lineDecoder.decode(sseChunk)) { + const sse = sseDecoder.decode(line); + if (sse) yield sse; + } + } + + for (const line of lineDecoder.flush()) { + const sse = sseDecoder.decode(line); + if (sse) yield sse; + } +} + +/** + * Given an async iterable iterator, iterates over it and yields full + * SSE chunks, i.e. yields when a double new-line is encountered. + */ +async function* iterSSEChunks(iterator: AsyncIterableIterator): AsyncGenerator { + let data = new Uint8Array(); + + for await (const chunk of iterator) { + if (chunk == null) { + continue; + } + + const binaryChunk = + chunk instanceof ArrayBuffer ? new Uint8Array(chunk) + : typeof chunk === 'string' ? new TextEncoder().encode(chunk) + : chunk; + + let newData = new Uint8Array(data.length + binaryChunk.length); + newData.set(data); + newData.set(binaryChunk, data.length); + data = newData; + + let patternIndex; + while ((patternIndex = findDoubleNewlineIndex(data)) !== -1) { + yield data.slice(0, patternIndex); + data = data.slice(patternIndex); + } + } + + if (data.length > 0) { + yield data; + } +} + +function findDoubleNewlineIndex(buffer: Uint8Array): number { + // This function searches the buffer for the end patterns (\r\r, \n\n, \r\n\r\n) + // and returns the index right after the first occurrence of any pattern, + // or -1 if none of the patterns are found. + const newline = 0x0a; // \n + const carriage = 0x0d; // \r + + for (let i = 0; i < buffer.length - 2; i++) { + if (buffer[i] === newline && buffer[i + 1] === newline) { + // \n\n + return i + 2; + } + if (buffer[i] === carriage && buffer[i + 1] === carriage) { + // \r\r + return i + 2; + } + if ( + buffer[i] === carriage && + buffer[i + 1] === newline && + i + 3 < buffer.length && + buffer[i + 2] === carriage && + buffer[i + 3] === newline + ) { + // \r\n\r\n + return i + 4; + } + } + + return -1; +} + class SSEDecoder { private data: string[]; private event: string | null; @@ -283,8 +351,8 @@ class SSEDecoder { */ class LineDecoder { // prettier-ignore - static NEWLINE_CHARS = new Set(['\n', '\r', '\x0b', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029']); - static NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g; + static NEWLINE_CHARS = new Set(['\n', '\r']); + static NEWLINE_REGEXP = /\r\n|[\n\r]/g; buffer: string[]; trailingCR: boolean; diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts index 479b2a341..6fe9a5781 100644 --- a/tests/streaming.test.ts +++ b/tests/streaming.test.ts @@ -1,4 +1,7 @@ -import { _decodeChunks as decodeChunks } from 'openai/streaming'; +import { Response } from 'node-fetch'; +import { PassThrough } from 'stream'; +import assert from 'assert'; +import { _iterSSEMessages, _decodeChunks as decodeChunks } from 'openai/streaming'; describe('line decoder', () => { test('basic', () => { @@ -27,3 +30,243 @@ describe('line decoder', () => { expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']); }); }); + +describe('streaming decoding', () => { + test('basic', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: completion\n'); + yield Buffer.from('data: {"foo":true}\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(JSON.parse(event.value.data)).toEqual({ foo: true }); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('data without event', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('data: {"foo":true}\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toBeNull(); + expect(JSON.parse(event.value.data)).toEqual({ foo: true }); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('event without data', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: foo\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('foo'); + expect(event.value.data).toEqual(''); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('multiple events', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: foo\n'); + yield Buffer.from('\n'); + yield Buffer.from('event: ping\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('foo'); + expect(event.value.data).toEqual(''); + + event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('ping'); + expect(event.value.data).toEqual(''); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('multiple events with data', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: foo\n'); + yield Buffer.from('data: {"foo":true}\n'); + yield Buffer.from('\n'); + yield Buffer.from('event: ping\n'); + yield Buffer.from('data: {"bar":false}\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('foo'); + expect(JSON.parse(event.value.data)).toEqual({ foo: true }); + + event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('ping'); + expect(JSON.parse(event.value.data)).toEqual({ bar: false }); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('multiple data lines with empty line', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: ping\n'); + yield Buffer.from('data: {\n'); + yield Buffer.from('data: "foo":\n'); + yield Buffer.from('data: \n'); + yield Buffer.from('data:\n'); + yield Buffer.from('data: true}\n'); + yield Buffer.from('\n\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('ping'); + expect(JSON.parse(event.value.data)).toEqual({ foo: true }); + expect(event.value.data).toEqual('{\n"foo":\n\n\ntrue}'); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('data json escaped double new line', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: ping\n'); + yield Buffer.from('data: {"foo": "my long\\n\\ncontent"}'); + yield Buffer.from('\n\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('ping'); + expect(JSON.parse(event.value.data)).toEqual({ foo: 'my long\n\ncontent' }); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('special new line characters', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('data: {"content": "culpa "}\n'); + yield Buffer.from('\n'); + yield Buffer.from('data: {"content": "'); + yield Buffer.from([0xe2, 0x80, 0xa8]); + yield Buffer.from('"}\n'); + yield Buffer.from('\n'); + yield Buffer.from('data: {"content": "foo"}\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(JSON.parse(event.value.data)).toEqual({ content: 'culpa ' }); + + event = await stream.next(); + assert(event.value); + expect(JSON.parse(event.value.data)).toEqual({ content: Buffer.from([0xe2, 0x80, 0xa8]).toString() }); + + event = await stream.next(); + assert(event.value); + expect(JSON.parse(event.value.data)).toEqual({ content: 'foo' }); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); + + test('multi-byte characters across chunks', async () => { + async function* body(): AsyncGenerator { + yield Buffer.from('event: completion\n'); + yield Buffer.from('data: {"content": "'); + // bytes taken from the string 'известни' and arbitrarily split + // so that some multi-byte characters span multiple chunks + yield Buffer.from([0xd0]); + yield Buffer.from([0xb8, 0xd0, 0xb7, 0xd0]); + yield Buffer.from([0xb2, 0xd0, 0xb5, 0xd1, 0x81, 0xd1, 0x82, 0xd0, 0xbd, 0xd0, 0xb8]); + yield Buffer.from('"}\n'); + yield Buffer.from('\n'); + } + + const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[ + Symbol.asyncIterator + ](); + + let event = await stream.next(); + assert(event.value); + expect(event.value.event).toEqual('completion'); + expect(JSON.parse(event.value.data)).toEqual({ content: 'известни' }); + + event = await stream.next(); + expect(event.done).toBeTruthy(); + }); +}); + +async function iteratorToStream(iterator: AsyncGenerator): Promise { + const parts: unknown[] = []; + + for await (const chunk of iterator) { + parts.push(chunk); + } + + let index = 0; + + const stream = new PassThrough({ + read() { + const value = parts[index]; + if (value === undefined) { + stream.end(); + } else { + index += 1; + stream.write(value); + } + }, + }); + + return stream; +} From 4999e9b691965c31c8979c5ce32fdb75c577dcf9 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 4 Apr 2024 07:32:26 -0400 Subject: [PATCH 053/533] release: 4.32.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 27308d159..d6b720422 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.32.1" + ".": "4.32.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index a1702ad3b..22748a5bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.32.2 (2024-04-04) + +Full Changelog: [v4.32.1...v4.32.2](https://github.com/openai/openai-node/compare/v4.32.1...v4.32.2) + +### Bug Fixes + +* **streaming:** handle special line characters and fix multi-byte character decoding ([#757](https://github.com/openai/openai-node/issues/757)) ([8dcdda2](https://github.com/openai/openai-node/commit/8dcdda2b0d1d86486eea5fd47d24a8d26fde4c19)) +* **tests:** update wrangler to v3.19.0 (CVE-2023-7080) ([#755](https://github.com/openai/openai-node/issues/755)) ([47ca41d](https://github.com/openai/openai-node/commit/47ca41da9a739b2e04b721cb1fe843e5dd152465)) + + +### Chores + +* **tests:** bump ecosystem tests dependencies ([#753](https://github.com/openai/openai-node/issues/753)) ([3f86ea2](https://github.com/openai/openai-node/commit/3f86ea2205c90e05bcbe582491a4bed01075a5b1)) + ## 4.32.1 (2024-04-02) Full Changelog: [v4.32.0...v4.32.1](https://github.com/openai/openai-node/compare/v4.32.0...v4.32.1) diff --git a/README.md b/README.md index aae0367b6..ba4b69838 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.32.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.32.2/mod.ts'; ``` diff --git a/build-deno b/build-deno index a56b6af13..8d0ee6da9 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.32.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.32.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 4d87ed952..3d0107223 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.32.1", + "version": "4.32.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c2e5453c3..ecc4c1a71 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.32.1'; // x-release-please-version +export const VERSION = '4.32.2'; // x-release-please-version From 4f38d4df907fe99f3757da6b58b422b4e663e67c Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 5 Apr 2024 08:36:22 -0400 Subject: [PATCH 054/533] feat(api): add additional messages when creating thread run (#759) --- src/resources/beta/threads/runs/runs.ts | 158 ++++++++++++++++++ .../beta/threads/runs/runs.test.ts | 5 + 2 files changed, 163 insertions(+) diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 5dfc7d595..04234a74f 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -529,6 +529,11 @@ export interface RunCreateParamsBase { */ additional_instructions?: string | null; + /** + * Adds additional messages to the thread before creating the run. + */ + additional_messages?: Array | null; + /** * Overrides the * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) @@ -574,6 +579,39 @@ export interface RunCreateParamsBase { } export namespace RunCreateParams { + export interface AdditionalMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the entity that is creating the message. Allowed values include: + * + * - `user`: Indicates the message is sent by an actual user and should be used in + * most cases to represent user-generated messages. + * - `assistant`: Indicates the message is generated by the assistant. Use this + * value to insert messages from the assistant into the conversation. + */ + role: 'user' | 'assistant'; + + /** + * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + * the message should use. There can be a maximum of 10 files attached to a + * message. Useful for tools like `retrieval` and `code_interpreter` that can + * access and use files. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + } + export type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; export type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; } @@ -637,6 +675,11 @@ export interface RunCreateAndPollParams { */ additional_instructions?: string | null; + /** + * Adds additional messages to the thread before creating the run. + */ + additional_messages?: Array | null; + /** * Overrides the * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) @@ -674,6 +717,41 @@ export interface RunCreateAndPollParams { tools?: Array | null; } +export namespace RunCreateAndPollParams { + export interface AdditionalMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the entity that is creating the message. Allowed values include: + * + * - `user`: Indicates the message is sent by an actual user and should be used in + * most cases to represent user-generated messages. + * - `assistant`: Indicates the message is generated by the assistant. Use this + * value to insert messages from the assistant into the conversation. + */ + role: 'user' | 'assistant'; + + /** + * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + * the message should use. There can be a maximum of 10 files attached to a + * message. Useful for tools like `retrieval` and `code_interpreter` that can + * access and use files. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + } +} + export interface RunCreateAndStreamParams { /** * The ID of the @@ -689,6 +767,11 @@ export interface RunCreateAndStreamParams { */ additional_instructions?: string | null; + /** + * Adds additional messages to the thread before creating the run. + */ + additional_messages?: Array | null; + /** * Overrides the * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) @@ -726,6 +809,41 @@ export interface RunCreateAndStreamParams { tools?: Array | null; } +export namespace RunCreateAndStreamParams { + export interface AdditionalMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the entity that is creating the message. Allowed values include: + * + * - `user`: Indicates the message is sent by an actual user and should be used in + * most cases to represent user-generated messages. + * - `assistant`: Indicates the message is generated by the assistant. Use this + * value to insert messages from the assistant into the conversation. + */ + role: 'user' | 'assistant'; + + /** + * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + * the message should use. There can be a maximum of 10 files attached to a + * message. Useful for tools like `retrieval` and `code_interpreter` that can + * access and use files. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + } +} + export interface RunStreamParams { /** * The ID of the @@ -741,6 +859,11 @@ export interface RunStreamParams { */ additional_instructions?: string | null; + /** + * Adds additional messages to the thread before creating the run. + */ + additional_messages?: Array | null; + /** * Overrides the * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) @@ -778,6 +901,41 @@ export interface RunStreamParams { tools?: Array | null; } +export namespace RunStreamParams { + export interface AdditionalMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the entity that is creating the message. Allowed values include: + * + * - `user`: Indicates the message is sent by an actual user and should be used in + * most cases to represent user-generated messages. + * - `assistant`: Indicates the message is generated by the assistant. Use this + * value to insert messages from the assistant into the conversation. + */ + role: 'user' | 'assistant'; + + /** + * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + * the message should use. There can be a maximum of 10 files attached to a + * message. Useful for tools like `retrieval` and `code_interpreter` that can + * access and use files. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + } +} + export type RunSubmitToolOutputsParams = | RunSubmitToolOutputsParamsNonStreaming | RunSubmitToolOutputsParamsStreaming; diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 5f17c1b58..2911cfd53 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -24,6 +24,11 @@ describe('resource runs', () => { const response = await openai.beta.threads.runs.create('string', { assistant_id: 'string', additional_instructions: 'string', + additional_messages: [ + { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, + { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, + { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, + ], instructions: 'string', metadata: {}, model: 'string', From 018ac718ccf6a96798ef8f91906b3b652aa50919 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 5 Apr 2024 08:36:43 -0400 Subject: [PATCH 055/533] release: 4.33.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d6b720422..e5b450ff3 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.32.2" + ".": "4.33.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 22748a5bd..f865d94f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.33.0 (2024-04-05) + +Full Changelog: [v4.32.2...v4.33.0](https://github.com/openai/openai-node/compare/v4.32.2...v4.33.0) + +### Features + +* **api:** add additional messages when creating thread run ([#759](https://github.com/openai/openai-node/issues/759)) ([f1fdb41](https://github.com/openai/openai-node/commit/f1fdb410e087f9b94faeda0558de573ec1118601)) + ## 4.32.2 (2024-04-04) Full Changelog: [v4.32.1...v4.32.2](https://github.com/openai/openai-node/compare/v4.32.1...v4.32.2) diff --git a/README.md b/README.md index ba4b69838..62c8967c6 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.32.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.33.0/mod.ts'; ``` diff --git a/build-deno b/build-deno index 8d0ee6da9..bbe96faae 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.32.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.33.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 3d0107223..490a9e492 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.32.2", + "version": "4.33.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index ecc4c1a71..6726dc21c 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.32.2'; // x-release-please-version +export const VERSION = '4.33.0'; // x-release-please-version From fcf748dbbd23f972ff9fd81a8b2a35232a2d6e5c Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 11 Apr 2024 12:12:38 -0400 Subject: [PATCH 056/533] chore(internal): improve ecosystem tests (#761) --- .gitignore | 5 + .prettierignore | 2 +- ecosystem-tests/cli.ts | 226 +++++++++++++++++++++++---- ecosystem-tests/deno/deno.jsonc | 4 + ecosystem-tests/deno/deno.lock | 32 ++-- ecosystem-tests/deno/import_map.json | 6 - ecosystem-tests/deno/main_test.ts | 5 +- 7 files changed, 229 insertions(+), 51 deletions(-) delete mode 100644 ecosystem-tests/deno/import_map.json diff --git a/.gitignore b/.gitignore index 58b3944a1..31b12ac63 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,8 @@ dist /deno /*.tgz .idea/ +tmp +.pack +ecosystem-tests/deno/package.json +ecosystem-tests/*/openai.tgz + diff --git a/.prettierignore b/.prettierignore index fc6160fb1..3548c5af9 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,5 +1,5 @@ CHANGELOG.md -/ecosystem-tests +/ecosystem-tests/*/** /node_modules /deno diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index c84c479d4..a3c1f27a4 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -5,16 +5,19 @@ import assert from 'assert'; import path from 'path'; const TAR_NAME = 'openai.tgz'; -const PACK_FILE = `.pack/${TAR_NAME}`; +const PACK_FOLDER = '.pack'; +const PACK_FILE = `${PACK_FOLDER}/${TAR_NAME}`; const IS_CI = Boolean(process.env['CI'] && process.env['CI'] !== 'false'); async function defaultNodeRunner() { await installPackage(); await run('npm', ['run', 'tsc']); - if (state.live) await run('npm', ['test']); + if (state.live) { + await run('npm', ['test']); + } } -const projects = { +const projectRunners = { 'node-ts-cjs': defaultNodeRunner, 'node-ts-cjs-web': defaultNodeRunner, 'node-ts-cjs-auto': defaultNodeRunner, @@ -76,30 +79,17 @@ const projects = { } }, deno: async () => { + // we don't need to explicitly install the package here + // because our deno setup relies on `rootDir/deno` to exist + // which is an artifact produced from our build process await run('deno', ['task', 'install']); - await installPackage(); - const packFile = getPackFile(); - - const openaiDir = path.resolve( - process.cwd(), - 'node_modules', - '.deno', - 'openai@3.3.0', - 'node_modules', - 'openai', - ); - - await run('sh', ['-c', 'rm -rf *'], { cwd: openaiDir, stdio: 'inherit' }); - await run('tar', ['xzf', path.resolve(packFile)], { cwd: openaiDir, stdio: 'inherit' }); - await run('sh', ['-c', 'mv package/* .'], { cwd: openaiDir, stdio: 'inherit' }); - await run('sh', ['-c', 'rm -rf package'], { cwd: openaiDir, stdio: 'inherit' }); - await run('deno', ['task', 'check']); + if (state.live) await run('deno', ['task', 'test']); }, }; -const projectNames = Object.keys(projects) as Array; +let projectNames = Object.keys(projectRunners) as Array; const projectNamesSet = new Set(projectNames); function parseArgs() { @@ -118,6 +108,11 @@ function parseArgs() { type: 'boolean', default: false, }, + skip: { + type: 'array', + default: [], + description: 'Skip one or more projects. Separate project names with a space.', + }, skipPack: { type: 'boolean', default: false, @@ -156,6 +151,10 @@ function parseArgs() { default: false, description: 'run all projects in parallel (jobs = # projects)', }, + noCleanup: { + type: 'boolean', + default: false, + }, }) .help().argv; } @@ -165,9 +164,32 @@ type Args = Awaited>; let state: Args & { rootDir: string }; async function main() { + if (!process.env['OPENAI_API_KEY']) { + console.error(`Error: The environment variable OPENAI_API_KEY must be set. Run the command + $echo 'OPENAI_API_KEY = "'"\${OPENAI_API_KEY}"'"' >> ecosystem-tests/cloudflare-worker/wrangler.toml`); + process.exit(0); + } + const args = (await parseArgs()) as Args; console.error(`args:`, args); + // Some projects, e.g. Deno can be slow to run, so offer the option to skip them. Example: + // --skip=deno node-ts-cjs + if (args.skip.length > 0) { + args.skip.forEach((projectName, idx) => { + // Ensure the inputted project name is lower case + args.skip[idx] = (projectName + '').toLowerCase(); + }); + + projectNames = projectNames.filter((projectName) => (args.skip as string[]).indexOf(projectName) < 0); + + args.skip.forEach((projectName) => { + projectNamesSet.delete(projectName as any); + }); + } + + const tmpFolderPath = path.resolve(process.cwd(), 'tmp'); + const rootDir = await packageDir(); console.error(`rootDir:`, rootDir); @@ -191,8 +213,63 @@ async function main() { const failed: typeof projectNames = []; + let cleanupWasRun = false; + + // Cleanup the various artifacts created as part of executing this script + async function runCleanup() { + if (cleanupWasRun) { + return; + } + cleanupWasRun = true; + + // Restore the original files in the ecosystem-tests folders from before + // npm install was run + await fileCache.restoreFiles(tmpFolderPath); + + const packFolderPath = path.join(process.cwd(), PACK_FOLDER); + + try { + // Clean up the .pack folder if this was the process that created it. + await fs.unlink(PACK_FILE); + await fs.rmdir(packFolderPath); + } catch (err) { + console.log('Failed to delete .pack folder', err); + } + + for (let i = 0; i < projectNames.length; i++) { + const projectName = (projectNames as any)[i] as string; + + await defaultNodeCleanup(projectName).catch((err: any) => { + console.error('Error: Cleanup of file artifacts failed for project', projectName, err); + }); + } + } + + async function runCleanupAndExit() { + await runCleanup(); + + process.exit(1); + } + + if (!(await fileExists(tmpFolderPath))) { + await fs.mkdir(tmpFolderPath); + } + let { jobs } = args; - if (args.parallel) jobs = projectsToRun.length; + if (args.parallel) { + jobs = projectsToRun.length; + } + + if (!args.noCleanup) { + // The cleanup code is only executed from the parent script that runs + // multiple projects. + process.on('SIGINT', runCleanupAndExit); + process.on('SIGTERM', runCleanupAndExit); + process.on('exit', runCleanup); + + await fileCache.cacheFiles(tmpFolderPath); + } + if (jobs > 1) { const queue = [...projectsToRun]; const runningProjects = new Set(); @@ -225,7 +302,9 @@ async function main() { [...Array(jobs).keys()].map(async () => { while (queue.length) { const project = queue.shift(); - if (!project) break; + if (!project) { + break; + } // preserve interleaved ordering of writes to stdout/stderr const chunks: { dest: 'stdout' | 'stderr'; data: string | Buffer }[] = []; @@ -238,6 +317,7 @@ async function main() { __filename, project, '--skip-pack', + '--noCleanup', `--retry=${args.retry}`, ...(args.live ? ['--live'] : []), ...(args.verbose ? ['--verbose'] : []), @@ -248,6 +328,7 @@ async function main() { ); child.stdout?.on('data', (data) => chunks.push({ dest: 'stdout', data })); child.stderr?.on('data', (data) => chunks.push({ dest: 'stderr', data })); + await child; } catch (error) { failed.push(project); @@ -255,7 +336,10 @@ async function main() { runningProjects.delete(project); } - if (IS_CI) console.log(`::group::${failed.includes(project) ? '❌' : '✅'} ${project}`); + if (IS_CI) { + console.log(`::group::${failed.includes(project) ? '❌' : '✅'} ${project}`); + } + for (const { data } of chunks) { process.stdout.write(data); } @@ -268,7 +352,7 @@ async function main() { clearProgress(); } else { for (const project of projectsToRun) { - const fn = projects[project]; + const fn = projectRunners[project]; await withChdir(path.join(rootDir, 'ecosystem-tests', project), async () => { console.error('\n'); @@ -294,6 +378,10 @@ async function main() { } } + if (!args.noCleanup) { + await runCleanup(); + } + if (failed.length) { console.error(`${failed.length} project(s) failed - ${failed.join(', ')}`); process.exit(1); @@ -340,10 +428,15 @@ async function buildPackage() { return; } - if (!(await pathExists('.pack'))) { - await fs.mkdir('.pack'); + if (!(await pathExists(PACK_FOLDER))) { + await fs.mkdir(PACK_FOLDER); } + // Run our build script to ensure all of our build artifacts are up to date. + // This matters the most for deno as it directly relies on build artifacts + // instead of the pack file + await run('yarn', ['build']); + const proc = await run('npm', ['pack', '--ignore-scripts', '--json'], { cwd: path.join(process.cwd(), 'dist'), alwaysPipe: true, @@ -366,6 +459,11 @@ async function installPackage() { return; } + try { + // Ensure that there is a clean node_modules folder. + await run('rm', ['-rf', `./node_modules`]); + } catch (err) {} + const packFile = getPackFile(); await fs.copyFile(packFile, `./${TAR_NAME}`); return await run('npm', ['install', '-D', `./${TAR_NAME}`]); @@ -440,6 +538,80 @@ export const packageDir = async (): Promise => { throw new Error('Package directory not found'); }; +// Caches files that are modified by this script, e.g. package.json, +// so that they can be restored when the script either finishes or is +// terminated +const fileCache = (() => { + const filesToCache: Array = ['package.json', 'package-lock.json', 'deno.lock', 'bun.lockb']; + + return { + // Copy existing files from each ecosystem-tests project folder to the ./tmp folder + cacheFiles: async (tmpFolderPath: string) => { + for (let i = 0; i < projectNames.length; i++) { + const projectName = (projectNames as any)[i] as string; + const projectPath = path.resolve(process.cwd(), 'ecosystem-tests', projectName); + + for (let j = 0; j < filesToCache.length; j++) { + const fileName = filesToCache[j] || ''; + + const filePath = path.resolve(projectPath, fileName); + if (await fileExists(filePath)) { + const tmpProjectPath = path.resolve(tmpFolderPath, projectName); + + if (!(await fileExists(tmpProjectPath))) { + await fs.mkdir(tmpProjectPath); + } + await fs.copyFile(filePath, path.resolve(tmpProjectPath, fileName)); + } + } + } + }, + + // Restore the original files to each ecosystem-tests project folder from the ./tmp folder + restoreFiles: async (tmpFolderPath: string) => { + for (let i = 0; i < projectNames.length; i++) { + const projectName = (projectNames as any)[i] as string; + + const projectPath = path.resolve(process.cwd(), 'ecosystem-tests', projectName); + const tmpProjectPath = path.resolve(tmpFolderPath, projectName); + + for (let j = 0; j < filesToCache.length; j++) { + const fileName = filesToCache[j] || ''; + + const filePath = path.resolve(tmpProjectPath, fileName); + if (await fileExists(filePath)) { + await fs.rename(filePath, path.resolve(projectPath, fileName)); + } + } + await fs.rmdir(tmpProjectPath); + } + }, + }; +})(); + +async function defaultNodeCleanup(projectName: string) { + try { + const projectPath = path.resolve(process.cwd(), 'ecosystem-tests', projectName); + + const packFilePath = path.resolve(projectPath, TAR_NAME); + + if (await fileExists(packFilePath)) { + await fs.unlink(packFilePath); + } + } catch (err) { + console.error('Cleanup failed for project', projectName, err); + } +} + +async function fileExists(filePath: string) { + try { + await fs.stat(filePath); + return true; + } catch { + return false; + } +} + main().catch((err) => { console.error(err); process.exit(1); diff --git a/ecosystem-tests/deno/deno.jsonc b/ecosystem-tests/deno/deno.jsonc index ba78e9d30..7de05f2ba 100644 --- a/ecosystem-tests/deno/deno.jsonc +++ b/ecosystem-tests/deno/deno.jsonc @@ -3,5 +3,9 @@ "install": "deno install --node-modules-dir main_test.ts -f", "check": "deno lint && deno check main_test.ts", "test": "deno test --allow-env --allow-net --allow-read --node-modules-dir" + }, + "imports": { + "openai": "../../deno/mod.ts", + "openai/": "../../deno/" } } diff --git a/ecosystem-tests/deno/deno.lock b/ecosystem-tests/deno/deno.lock index 17a25fcbc..aa22a1427 100644 --- a/ecosystem-tests/deno/deno.lock +++ b/ecosystem-tests/deno/deno.lock @@ -1,20 +1,14 @@ { - "version": "2", - "remote": { - "https://deno.land/std@0.192.0/fmt/colors.ts": "d67e3cd9f472535241a8e410d33423980bec45047e343577554d3356e1f0ef4e", - "https://deno.land/std@0.192.0/testing/_diff.ts": "1a3c044aedf77647d6cac86b798c6417603361b66b54c53331b312caeb447aea", - "https://deno.land/std@0.192.0/testing/_format.ts": "a69126e8a469009adf4cf2a50af889aca364c349797e63174884a52ff75cf4c7", - "https://deno.land/std@0.192.0/testing/asserts.ts": "e16d98b4d73ffc4ed498d717307a12500ae4f2cbe668f1a215632d19fcffc22f" - }, - "npm": { + "version": "3", + "packages": { "specifiers": { - "@types/node@^20.3.1": "@types/node@20.3.1", - "node-fetch@^3.0.0": "node-fetch@3.3.1", - "openai": "openai@3.3.0", - "ts-node@^10.9.1": "ts-node@10.9.1_@types+node@20.3.1_typescript@5.1.3", - "typescript@^5.1.3": "typescript@5.1.3" + "npm:@types/node@^20.3.1": "npm:@types/node@20.3.1", + "npm:node-fetch@^3.0.0": "npm:node-fetch@3.3.1", + "npm:openai": "npm:openai@3.3.0", + "npm:ts-node@^10.9.1": "npm:ts-node@10.9.1_@types+node@20.3.1_typescript@5.1.3", + "npm:typescript@^5.1.3": "npm:typescript@5.1.3" }, - "packages": { + "npm": { "@cspotcode/source-map-support@0.8.1": { "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", "dependencies": { @@ -195,5 +189,15 @@ "dependencies": {} } } + }, + "redirects": { + "https://deno.land/x/fastest_levenshtein/mod.ts": "https://deno.land/x/fastest_levenshtein@1.0.10/mod.ts" + }, + "remote": { + "https://deno.land/std@0.192.0/fmt/colors.ts": "d67e3cd9f472535241a8e410d33423980bec45047e343577554d3356e1f0ef4e", + "https://deno.land/std@0.192.0/testing/_diff.ts": "1a3c044aedf77647d6cac86b798c6417603361b66b54c53331b312caeb447aea", + "https://deno.land/std@0.192.0/testing/_format.ts": "a69126e8a469009adf4cf2a50af889aca364c349797e63174884a52ff75cf4c7", + "https://deno.land/std@0.192.0/testing/asserts.ts": "e16d98b4d73ffc4ed498d717307a12500ae4f2cbe668f1a215632d19fcffc22f", + "https://deno.land/x/fastest_levenshtein@1.0.10/mod.ts": "aea49d54b6bb37082b2377da2ea068331da07b2a515621d8eff97538b7157b40" } } diff --git a/ecosystem-tests/deno/import_map.json b/ecosystem-tests/deno/import_map.json deleted file mode 100644 index 941f5396b..000000000 --- a/ecosystem-tests/deno/import_map.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "imports": { - "/": "./", - "./": "./" - } -} diff --git a/ecosystem-tests/deno/main_test.ts b/ecosystem-tests/deno/main_test.ts index b841b4053..b27c9079b 100644 --- a/ecosystem-tests/deno/main_test.ts +++ b/ecosystem-tests/deno/main_test.ts @@ -1,7 +1,6 @@ import { assertEquals, AssertionError } from 'https://deno.land/std@0.192.0/testing/asserts.ts'; -import OpenAI, { toFile } from 'npm:openai@3.3.0'; import { distance } from 'https://deno.land/x/fastest_levenshtein/mod.ts'; -import { ChatCompletion } from 'npm:openai@3.3.0/resources/chat/completions'; +import OpenAI, { toFile } from 'openai'; const url = 'https://audio-samples.github.io/samples/mp3/blizzard_biased/sample-1.mp3'; const filename = 'sample-1.mp3'; @@ -66,7 +65,7 @@ Deno.test(async function rawResponse() { offset += chunk.length; } - const json: ChatCompletion = JSON.parse(new TextDecoder().decode(merged)); + const json: OpenAI.ChatCompletion = JSON.parse(new TextDecoder().decode(merged)); assertSimilar(json.choices[0]?.message.content || '', 'This is a test', 10); }); From b6acf54baab7e6cbf6ce3ad1d6c70197cc0181d0 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 12 Apr 2024 12:52:04 -0400 Subject: [PATCH 057/533] chore(internal): formatting (#763) --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f51c7a308..d6c83025f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,3 +28,5 @@ jobs: - name: Check types run: | yarn build + + From a22c6f3e7ffc2367c71cdec106b9803dd26b6397 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 12 Apr 2024 12:52:29 -0400 Subject: [PATCH 058/533] release: 4.33.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 14 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e5b450ff3..bd6b3284c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.33.0" + ".": "4.33.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index f865d94f7..f3067e694 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.33.1 (2024-04-12) + +Full Changelog: [v4.33.0...v4.33.1](https://github.com/openai/openai-node/compare/v4.33.0...v4.33.1) + +### Chores + +* **internal:** formatting ([#763](https://github.com/openai/openai-node/issues/763)) ([b6acf54](https://github.com/openai/openai-node/commit/b6acf54baab7e6cbf6ce3ad1d6c70197cc0181d0)) +* **internal:** improve ecosystem tests ([#761](https://github.com/openai/openai-node/issues/761)) ([fcf748d](https://github.com/openai/openai-node/commit/fcf748dbbd23f972ff9fd81a8b2a35232a2d6e5c)) + ## 4.33.0 (2024-04-05) Full Changelog: [v4.32.2...v4.33.0](https://github.com/openai/openai-node/compare/v4.32.2...v4.33.0) diff --git a/README.md b/README.md index 62c8967c6..2d1ae6089 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.33.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.33.1/mod.ts'; ``` diff --git a/build-deno b/build-deno index bbe96faae..c06cd3bcf 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.33.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.33.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 490a9e492..998b6a2c7 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.33.0", + "version": "4.33.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 6726dc21c..0d8f2ffd7 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.33.0'; // x-release-please-version +export const VERSION = '4.33.1'; // x-release-please-version From 01f01881c457fa6bebf8ac923941c6628037b9ac Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 15 Apr 2024 15:04:29 -0400 Subject: [PATCH 059/533] feat(api): updates (#766) --- .stats.yml | 2 +- api.md | 32 +- src/resources/beta/assistants/assistants.ts | 27 +- src/resources/beta/beta.ts | 5 + src/resources/beta/index.ts | 9 +- src/resources/beta/threads/index.ts | 29 +- src/resources/beta/threads/runs/runs.ts | 421 +++++++++++++++++- src/resources/beta/threads/threads.ts | 323 +++++++++++++- src/resources/chat/completions.ts | 5 +- src/resources/fine-tuning/fine-tuning.ts | 5 +- src/resources/fine-tuning/index.ts | 5 +- src/resources/fine-tuning/jobs/checkpoints.ts | 108 +++++ src/resources/fine-tuning/jobs/index.ts | 21 + src/resources/fine-tuning/{ => jobs}/jobs.ts | 135 +++++- .../beta/assistants/assistants.test.ts | 4 +- .../beta/threads/runs/runs.test.ts | 7 +- .../beta/threads/threads.test.ts | 7 +- tests/api-resources/chat/completions.test.ts | 4 +- .../fine-tuning/jobs/checkpoints.test.ts | 42 ++ .../fine-tuning/{ => jobs}/jobs.test.ts | 30 ++ 20 files changed, 1177 insertions(+), 44 deletions(-) create mode 100644 src/resources/fine-tuning/jobs/checkpoints.ts create mode 100644 src/resources/fine-tuning/jobs/index.ts rename src/resources/fine-tuning/{ => jobs}/jobs.ts (66%) create mode 100644 tests/api-resources/fine-tuning/jobs/checkpoints.test.ts rename tests/api-resources/fine-tuning/{ => jobs}/jobs.test.ts (87%) diff --git a/.stats.yml b/.stats.yml index c550abf3c..284caebf4 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1 +1 @@ -configured_endpoints: 51 +configured_endpoints: 52 diff --git a/api.md b/api.md index 2f82dd17b..c6a2bf273 100644 --- a/api.md +++ b/api.md @@ -149,16 +149,29 @@ Methods: Types: -- FineTuningJob -- FineTuningJobEvent +- FineTuningJob +- FineTuningJobEvent +- FineTuningJobIntegration +- FineTuningJobWandbIntegration +- FineTuningJobWandbIntegrationObject Methods: -- client.fineTuning.jobs.create({ ...params }) -> FineTuningJob -- client.fineTuning.jobs.retrieve(fineTuningJobId) -> FineTuningJob -- client.fineTuning.jobs.list({ ...params }) -> FineTuningJobsPage -- client.fineTuning.jobs.cancel(fineTuningJobId) -> FineTuningJob -- client.fineTuning.jobs.listEvents(fineTuningJobId, { ...params }) -> FineTuningJobEventsPage +- client.fineTuning.jobs.create({ ...params }) -> FineTuningJob +- client.fineTuning.jobs.retrieve(fineTuningJobId) -> FineTuningJob +- client.fineTuning.jobs.list({ ...params }) -> FineTuningJobsPage +- client.fineTuning.jobs.cancel(fineTuningJobId) -> FineTuningJob +- client.fineTuning.jobs.listEvents(fineTuningJobId, { ...params }) -> FineTuningJobEventsPage + +### Checkpoints + +Types: + +- FineTuningJobCheckpoint + +Methods: + +- client.fineTuning.jobs.checkpoints.list(fineTuningJobId, { ...params }) -> FineTuningJobCheckpointsPage # Beta @@ -214,6 +227,11 @@ Methods: Types: +- AssistantResponseFormat +- AssistantResponseFormatOption +- AssistantToolChoice +- AssistantToolChoiceFunction +- AssistantToolChoiceOption - Thread - ThreadDeleted diff --git a/src/resources/beta/assistants/assistants.ts b/src/resources/beta/assistants/assistants.ts index 1e8ca6ee9..fc9afe2ae 100644 --- a/src/resources/beta/assistants/assistants.ts +++ b/src/resources/beta/assistants/assistants.ts @@ -113,7 +113,7 @@ export interface Assistant { file_ids: Array; /** - * The system instructions that the assistant uses. The maximum length is 32768 + * The system instructions that the assistant uses. The maximum length is 256,000 * characters. */ instructions: string | null; @@ -930,7 +930,26 @@ export interface AssistantCreateParams { * [Model overview](https://platform.openai.com/docs/models/overview) for * descriptions of them. */ - model: string; + model: + | (string & {}) + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613'; /** * The description of the assistant. The maximum length is 512 characters. @@ -945,7 +964,7 @@ export interface AssistantCreateParams { file_ids?: Array; /** - * The system instructions that the assistant uses. The maximum length is 32768 + * The system instructions that the assistant uses. The maximum length is 256,000 * characters. */ instructions?: string | null; @@ -986,7 +1005,7 @@ export interface AssistantUpdateParams { file_ids?: Array; /** - * The system instructions that the assistant uses. The maximum length is 32768 + * The system instructions that the assistant uses. The maximum length is 256,000 * characters. */ instructions?: string | null; diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 7d4457319..8f8148f9b 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -30,6 +30,11 @@ export namespace Beta { export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams; export import AssistantListParams = AssistantsAPI.AssistantListParams; export import Threads = ThreadsAPI.Threads; + export import AssistantResponseFormat = ThreadsAPI.AssistantResponseFormat; + export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption; + export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice; + export import AssistantToolChoiceFunction = ThreadsAPI.AssistantToolChoiceFunction; + export import AssistantToolChoiceOption = ThreadsAPI.AssistantToolChoiceOption; export import Thread = ThreadsAPI.Thread; export import ThreadDeleted = ThreadsAPI.ThreadDeleted; export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams; diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index e43ff7315..54407edb3 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -18,9 +18,12 @@ export { AssistantsPage, Assistants, } from './assistants/index'; -export { Beta } from './beta'; -export { Chat } from './chat/index'; export { + AssistantResponseFormat, + AssistantResponseFormatOption, + AssistantToolChoice, + AssistantToolChoiceFunction, + AssistantToolChoiceOption, Thread, ThreadDeleted, ThreadCreateParams, @@ -32,3 +35,5 @@ export { ThreadCreateAndRunStreamParams, Threads, } from './threads/index'; +export { Beta } from './beta'; +export { Chat } from './chat/index'; diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index ac2f9a4fa..5f41766a9 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -27,6 +27,23 @@ export { MessagesPage, Messages, } from './messages/index'; +export { + AssistantResponseFormat, + AssistantResponseFormatOption, + AssistantToolChoice, + AssistantToolChoiceFunction, + AssistantToolChoiceOption, + Thread, + ThreadDeleted, + ThreadCreateParams, + ThreadUpdateParams, + ThreadCreateAndRunParams, + ThreadCreateAndRunParamsNonStreaming, + ThreadCreateAndRunParamsStreaming, + ThreadCreateAndRunPollParams, + ThreadCreateAndRunStreamParams, + Threads, +} from './threads'; export { RequiredActionFunctionToolCall, Run, @@ -47,15 +64,3 @@ export { RunsPage, Runs, } from './runs/index'; -export { - Thread, - ThreadDeleted, - ThreadCreateParams, - ThreadUpdateParams, - ThreadCreateAndRunParams, - ThreadCreateAndRunParamsNonStreaming, - ThreadCreateAndRunParamsStreaming, - ThreadCreateAndRunPollParams, - ThreadCreateAndRunStreamParams, - Threads, -} from './threads'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 04234a74f..4cfa6c36e 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -9,6 +9,7 @@ import { sleep } from 'openai/core'; import { RunSubmitToolOutputsParamsStream } from 'openai/lib/AssistantStream'; import * as RunsAPI from 'openai/resources/beta/threads/runs/runs'; import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants'; +import * as ThreadsAPI from 'openai/resources/beta/threads/threads'; import * as StepsAPI from 'openai/resources/beta/threads/runs/steps'; import { CursorPage, type CursorPageParams } from 'openai/pagination'; import { Stream } from 'openai/streaming'; @@ -356,6 +357,12 @@ export interface Run { */ file_ids: Array; + /** + * Details on why the run is incomplete. Will be `null` if the run is not + * incomplete. + */ + incomplete_details: Run.IncompleteDetails | null; + /** * The instructions that the * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for @@ -368,6 +375,18 @@ export interface Run { */ last_error: Run.LastError | null; + /** + * The maximum number of completion tokens specified to have been used over the + * course of the run. + */ + max_completion_tokens: number | null; + + /** + * The maximum number of prompt tokens specified to have been used over the course + * of the run. + */ + max_prompt_tokens: number | null; + /** * Set of 16 key-value pairs that can be attached to an object. This can be useful * for storing additional information about the object in a structured format. Keys @@ -394,6 +413,24 @@ export interface Run { */ required_action: Run.RequiredAction | null; + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * message the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to + * produce JSON yourself via a system or user message. Without this, the model may + * generate an unending stream of whitespace until the generation reaches the token + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. + */ + response_format: ThreadsAPI.AssistantResponseFormatOption | null; + /** * The Unix timestamp (in seconds) for when the run was started. */ @@ -412,6 +449,16 @@ export interface Run { */ thread_id: string; + /** + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tools and instead generates a message. `auto` is the default value + * and means the model can pick between generating a message or calling a tool. + * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to + * call that tool. + */ + tool_choice: ThreadsAPI.AssistantToolChoiceOption | null; + /** * The list of tools that the * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for @@ -419,6 +466,8 @@ export interface Run { */ tools: Array; + truncation_strategy: Run.TruncationStrategy | null; + /** * Usage statistics related to the run. This value will be `null` if the run is not * in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -432,6 +481,18 @@ export interface Run { } export namespace Run { + /** + * Details on why the run is incomplete. Will be `null` if the run is not + * incomplete. + */ + export interface IncompleteDetails { + /** + * The reason why the run is incomplete. This will point to which specific token + * limit was reached over the course of the run. + */ + reason?: 'max_completion_tokens' | 'max_prompt_tokens'; + } + /** * The last error associated with this run. Will be `null` if there are no errors. */ @@ -475,6 +536,22 @@ export namespace Run { } } + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } + /** * Usage statistics related to the run. This value will be `null` if the run is not * in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -541,6 +618,24 @@ export interface RunCreateParamsBase { */ instructions?: string | null; + /** + * The maximum number of completion tokens that may be used over the course of the + * run. The run will make a best effort to use only the number of completion tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_completion_tokens?: number | null; + + /** + * The maximum number of prompt tokens that may be used over the course of the run. + * The run will make a best effort to use only the number of prompt tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * prompt tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_prompt_tokens?: number | null; + /** * Set of 16 key-value pairs that can be attached to an object. This can be useful * for storing additional information about the object in a structured format. Keys @@ -555,7 +650,45 @@ export interface RunCreateParamsBase { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: string | null; + model?: + | (string & {}) + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613' + | null; + + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * message the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to + * produce JSON yourself via a system or user message. Without this, the model may + * generate an unending stream of whitespace until the generation reaches the token + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. + */ + response_format?: ThreadsAPI.AssistantResponseFormatOption | null; /** * If `true`, returns a stream of events that happen during the Run as server-sent @@ -571,11 +704,23 @@ export interface RunCreateParamsBase { */ temperature?: number | null; + /** + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tools and instead generates a message. `auto` is the default value + * and means the model can pick between generating a message or calling a tool. + * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to + * call that tool. + */ + tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. */ tools?: Array | null; + + truncation_strategy?: RunCreateParams.TruncationStrategy | null; } export namespace RunCreateParams { @@ -612,6 +757,22 @@ export namespace RunCreateParams { metadata?: unknown | null; } + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } + export type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; export type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; } @@ -687,6 +848,24 @@ export interface RunCreateAndPollParams { */ instructions?: string | null; + /** + * The maximum number of completion tokens that may be used over the course of the + * run. The run will make a best effort to use only the number of completion tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_completion_tokens?: number | null; + + /** + * The maximum number of prompt tokens that may be used over the course of the run. + * The run will make a best effort to use only the number of prompt tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * prompt tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_prompt_tokens?: number | null; + /** * Set of 16 key-value pairs that can be attached to an object. This can be useful * for storing additional information about the object in a structured format. Keys @@ -701,7 +880,45 @@ export interface RunCreateAndPollParams { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: string | null; + model?: + | (string & {}) + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613' + | null; + + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * message the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to + * produce JSON yourself via a system or user message. Without this, the model may + * generate an unending stream of whitespace until the generation reaches the token + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. + */ + response_format?: ThreadsAPI.AssistantResponseFormatOption | null; /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -710,11 +927,23 @@ export interface RunCreateAndPollParams { */ temperature?: number | null; + /** + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tools and instead generates a message. `auto` is the default value + * and means the model can pick between generating a message or calling a tool. + * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to + * call that tool. + */ + tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. */ tools?: Array | null; + + truncation_strategy?: RunCreateAndPollParams.TruncationStrategy | null; } export namespace RunCreateAndPollParams { @@ -750,6 +979,22 @@ export namespace RunCreateAndPollParams { */ metadata?: unknown | null; } + + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } } export interface RunCreateAndStreamParams { @@ -779,6 +1024,24 @@ export interface RunCreateAndStreamParams { */ instructions?: string | null; + /** + * The maximum number of completion tokens that may be used over the course of the + * run. The run will make a best effort to use only the number of completion tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_completion_tokens?: number | null; + + /** + * The maximum number of prompt tokens that may be used over the course of the run. + * The run will make a best effort to use only the number of prompt tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * prompt tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_prompt_tokens?: number | null; + /** * Set of 16 key-value pairs that can be attached to an object. This can be useful * for storing additional information about the object in a structured format. Keys @@ -793,7 +1056,45 @@ export interface RunCreateAndStreamParams { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: string | null; + model?: + | (string & {}) + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613' + | null; + + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * message the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to + * produce JSON yourself via a system or user message. Without this, the model may + * generate an unending stream of whitespace until the generation reaches the token + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. + */ + response_format?: ThreadsAPI.AssistantResponseFormatOption | null; /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -802,11 +1103,23 @@ export interface RunCreateAndStreamParams { */ temperature?: number | null; + /** + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tools and instead generates a message. `auto` is the default value + * and means the model can pick between generating a message or calling a tool. + * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to + * call that tool. + */ + tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. */ tools?: Array | null; + + truncation_strategy?: RunCreateAndStreamParams.TruncationStrategy | null; } export namespace RunCreateAndStreamParams { @@ -842,6 +1155,22 @@ export namespace RunCreateAndStreamParams { */ metadata?: unknown | null; } + + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } } export interface RunStreamParams { @@ -871,6 +1200,24 @@ export interface RunStreamParams { */ instructions?: string | null; + /** + * The maximum number of completion tokens that may be used over the course of the + * run. The run will make a best effort to use only the number of completion tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_completion_tokens?: number | null; + + /** + * The maximum number of prompt tokens that may be used over the course of the run. + * The run will make a best effort to use only the number of prompt tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * prompt tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_prompt_tokens?: number | null; + /** * Set of 16 key-value pairs that can be attached to an object. This can be useful * for storing additional information about the object in a structured format. Keys @@ -885,7 +1232,45 @@ export interface RunStreamParams { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: string | null; + model?: + | (string & {}) + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613' + | null; + + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * message the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to + * produce JSON yourself via a system or user message. Without this, the model may + * generate an unending stream of whitespace until the generation reaches the token + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. + */ + response_format?: ThreadsAPI.AssistantResponseFormatOption | null; /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -894,11 +1279,23 @@ export interface RunStreamParams { */ temperature?: number | null; + /** + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tools and instead generates a message. `auto` is the default value + * and means the model can pick between generating a message or calling a tool. + * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to + * call that tool. + */ + tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. */ tools?: Array | null; + + truncation_strategy?: RunStreamParams.TruncationStrategy | null; } export namespace RunStreamParams { @@ -934,6 +1331,22 @@ export namespace RunStreamParams { */ metadata?: unknown | null; } + + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } } export type RunSubmitToolOutputsParams = diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 1b4b3f7d5..29682c308 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -116,6 +116,66 @@ export class Threads extends APIResource { } } +/** + * An object describing the expected output of the model. If `json_object` only + * `function` type `tools` are allowed to be passed to the Run. If `text` the model + * can return text or any value needed. + */ +export interface AssistantResponseFormat { + /** + * Must be one of `text` or `json_object`. + */ + type?: 'text' | 'json_object'; +} + +/** + * Specifies the format that the model must output. Compatible with + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * message the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to + * produce JSON yourself via a system or user message. Without this, the model may + * generate an unending stream of whitespace until the generation reaches the token + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. + */ +export type AssistantResponseFormatOption = 'none' | 'auto' | AssistantResponseFormat; + +/** + * Specifies a tool the model should use. Use to force the model to call a specific + * tool. + */ +export interface AssistantToolChoice { + /** + * The type of the tool. If type is `function`, the function name must be set + */ + type: 'function' | 'code_interpreter' | 'retrieval'; + + function?: AssistantToolChoiceFunction; +} + +export interface AssistantToolChoiceFunction { + /** + * The name of the function to call. + */ + name: string; +} + +/** + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tools and instead generates a message. `auto` is the default value + * and means the model can pick between generating a message or calling a tool. + * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to + * call that tool. + */ +export type AssistantToolChoiceOption = 'none' | 'auto' | AssistantToolChoice; + /** * Represents a thread that contains * [messages](https://platform.openai.com/docs/api-reference/messages). @@ -232,6 +292,24 @@ export interface ThreadCreateAndRunParamsBase { */ instructions?: string | null; + /** + * The maximum number of completion tokens that may be used over the course of the + * run. The run will make a best effort to use only the number of completion tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_completion_tokens?: number | null; + + /** + * The maximum number of prompt tokens that may be used over the course of the run. + * The run will make a best effort to use only the number of prompt tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * prompt tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_prompt_tokens?: number | null; + /** * Set of 16 key-value pairs that can be attached to an object. This can be useful * for storing additional information about the object in a structured format. Keys @@ -246,7 +324,45 @@ export interface ThreadCreateAndRunParamsBase { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: string | null; + model?: + | (string & {}) + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613' + | null; + + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * message the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to + * produce JSON yourself via a system or user message. Without this, the model may + * generate an unending stream of whitespace until the generation reaches the token + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. + */ + response_format?: AssistantResponseFormatOption | null; /** * If `true`, returns a stream of events that happen during the Run as server-sent @@ -267,6 +383,16 @@ export interface ThreadCreateAndRunParamsBase { */ thread?: ThreadCreateAndRunParams.Thread; + /** + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tools and instead generates a message. `auto` is the default value + * and means the model can pick between generating a message or calling a tool. + * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to + * call that tool. + */ + tool_choice?: AssistantToolChoiceOption | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. @@ -274,6 +400,8 @@ export interface ThreadCreateAndRunParamsBase { tools?: Array< AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool > | null; + + truncation_strategy?: ThreadCreateAndRunParams.TruncationStrategy | null; } export namespace ThreadCreateAndRunParams { @@ -331,6 +459,22 @@ export namespace ThreadCreateAndRunParams { } } + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } + export type ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; export type ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; } @@ -367,6 +511,24 @@ export interface ThreadCreateAndRunPollParams { */ instructions?: string | null; + /** + * The maximum number of completion tokens that may be used over the course of the + * run. The run will make a best effort to use only the number of completion tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_completion_tokens?: number | null; + + /** + * The maximum number of prompt tokens that may be used over the course of the run. + * The run will make a best effort to use only the number of prompt tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * prompt tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_prompt_tokens?: number | null; + /** * Set of 16 key-value pairs that can be attached to an object. This can be useful * for storing additional information about the object in a structured format. Keys @@ -381,7 +543,45 @@ export interface ThreadCreateAndRunPollParams { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: string | null; + model?: + | (string & {}) + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613' + | null; + + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * message the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to + * produce JSON yourself via a system or user message. Without this, the model may + * generate an unending stream of whitespace until the generation reaches the token + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. + */ + response_format?: AssistantResponseFormatOption | null; /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -395,6 +595,16 @@ export interface ThreadCreateAndRunPollParams { */ thread?: ThreadCreateAndRunPollParams.Thread; + /** + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tools and instead generates a message. `auto` is the default value + * and means the model can pick between generating a message or calling a tool. + * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to + * call that tool. + */ + tool_choice?: AssistantToolChoiceOption | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. @@ -402,6 +612,8 @@ export interface ThreadCreateAndRunPollParams { tools?: Array< AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool > | null; + + truncation_strategy?: ThreadCreateAndRunPollParams.TruncationStrategy | null; } export namespace ThreadCreateAndRunPollParams { @@ -458,6 +670,22 @@ export namespace ThreadCreateAndRunPollParams { metadata?: unknown | null; } } + + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } } export interface ThreadCreateAndRunStreamParams { @@ -474,6 +702,24 @@ export interface ThreadCreateAndRunStreamParams { */ instructions?: string | null; + /** + * The maximum number of completion tokens that may be used over the course of the + * run. The run will make a best effort to use only the number of completion tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_completion_tokens?: number | null; + + /** + * The maximum number of prompt tokens that may be used over the course of the run. + * The run will make a best effort to use only the number of prompt tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * prompt tokens specified, the run will end with status `complete`. See + * `incomplete_details` for more info. + */ + max_prompt_tokens?: number | null; + /** * Set of 16 key-value pairs that can be attached to an object. This can be useful * for storing additional information about the object in a structured format. Keys @@ -488,7 +734,45 @@ export interface ThreadCreateAndRunStreamParams { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: string | null; + model?: + | (string & {}) + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613' + | null; + + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * message the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to + * produce JSON yourself via a system or user message. Without this, the model may + * generate an unending stream of whitespace until the generation reaches the token + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. + */ + response_format?: AssistantResponseFormatOption | null; /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will @@ -502,6 +786,16 @@ export interface ThreadCreateAndRunStreamParams { */ thread?: ThreadCreateAndRunStreamParams.Thread; + /** + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tools and instead generates a message. `auto` is the default value + * and means the model can pick between generating a message or calling a tool. + * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to + * call that tool. + */ + tool_choice?: AssistantToolChoiceOption | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. @@ -509,6 +803,8 @@ export interface ThreadCreateAndRunStreamParams { tools?: Array< AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool > | null; + + truncation_strategy?: ThreadCreateAndRunStreamParams.TruncationStrategy | null; } export namespace ThreadCreateAndRunStreamParams { @@ -565,9 +861,30 @@ export namespace ThreadCreateAndRunStreamParams { metadata?: unknown | null; } } + + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } } export namespace Threads { + export import AssistantResponseFormat = ThreadsAPI.AssistantResponseFormat; + export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption; + export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice; + export import AssistantToolChoiceFunction = ThreadsAPI.AssistantToolChoiceFunction; + export import AssistantToolChoiceOption = ThreadsAPI.AssistantToolChoiceOption; export import Thread = ThreadsAPI.Thread; export import ThreadDeleted = ThreadsAPI.ThreadDeleted; export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 8119639f2..2288265ea 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -667,6 +667,8 @@ export interface ChatCompletionCreateParamsBase { */ model: | (string & {}) + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' @@ -730,8 +732,7 @@ export interface ChatCompletionCreateParamsBase { /** * Whether to return log probabilities of the output tokens or not. If true, * returns the log probabilities of each output token returned in the `content` of - * `message`. This option is currently not available on the `gpt-4-vision-preview` - * model. + * `message`. */ logprobs?: boolean | null; diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index e62f8f09c..c8d688b0c 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from 'openai/resource'; -import * as JobsAPI from 'openai/resources/fine-tuning/jobs'; +import * as JobsAPI from 'openai/resources/fine-tuning/jobs/jobs'; export class FineTuning extends APIResource { jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); @@ -11,6 +11,9 @@ export namespace FineTuning { export import Jobs = JobsAPI.Jobs; export import FineTuningJob = JobsAPI.FineTuningJob; export import FineTuningJobEvent = JobsAPI.FineTuningJobEvent; + export import FineTuningJobIntegration = JobsAPI.FineTuningJobIntegration; + export import FineTuningJobWandbIntegration = JobsAPI.FineTuningJobWandbIntegration; + export import FineTuningJobWandbIntegrationObject = JobsAPI.FineTuningJobWandbIntegrationObject; export import FineTuningJobsPage = JobsAPI.FineTuningJobsPage; export import FineTuningJobEventsPage = JobsAPI.FineTuningJobEventsPage; export import JobCreateParams = JobsAPI.JobCreateParams; diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts index 2885f62f4..1d8739a0a 100644 --- a/src/resources/fine-tuning/index.ts +++ b/src/resources/fine-tuning/index.ts @@ -4,10 +4,13 @@ export { FineTuning } from './fine-tuning'; export { FineTuningJob, FineTuningJobEvent, + FineTuningJobIntegration, + FineTuningJobWandbIntegration, + FineTuningJobWandbIntegrationObject, JobCreateParams, JobListParams, JobListEventsParams, FineTuningJobsPage, FineTuningJobEventsPage, Jobs, -} from './jobs'; +} from './jobs/index'; diff --git a/src/resources/fine-tuning/jobs/checkpoints.ts b/src/resources/fine-tuning/jobs/checkpoints.ts new file mode 100644 index 000000000..468cb3001 --- /dev/null +++ b/src/resources/fine-tuning/jobs/checkpoints.ts @@ -0,0 +1,108 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import * as Core from 'openai/core'; +import { APIResource } from 'openai/resource'; +import { isRequestOptions } from 'openai/core'; +import * as CheckpointsAPI from 'openai/resources/fine-tuning/jobs/checkpoints'; +import { CursorPage, type CursorPageParams } from 'openai/pagination'; + +export class Checkpoints extends APIResource { + /** + * List checkpoints for a fine-tuning job. + */ + list( + fineTuningJobId: string, + query?: CheckpointListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + fineTuningJobId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + fineTuningJobId: string, + query: CheckpointListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(fineTuningJobId, {}, query); + } + return this._client.getAPIList( + `/fine_tuning/jobs/${fineTuningJobId}/checkpoints`, + FineTuningJobCheckpointsPage, + { query, ...options }, + ); + } +} + +export class FineTuningJobCheckpointsPage extends CursorPage {} + +/** + * The `fine_tuning.job.checkpoint` object represents a model checkpoint for a + * fine-tuning job that is ready to use. + */ +export interface FineTuningJobCheckpoint { + /** + * The checkpoint identifier, which can be referenced in the API endpoints. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the checkpoint was created. + */ + created_at: number; + + /** + * The name of the fine-tuned checkpoint model that is created. + */ + fine_tuned_model_checkpoint: string; + + /** + * The name of the fine-tuning job that this checkpoint was created from. + */ + fine_tuning_job_id: string; + + /** + * Metrics at the step number during the fine-tuning job. + */ + metrics: FineTuningJobCheckpoint.Metrics; + + /** + * The object type, which is always "fine_tuning.job.checkpoint". + */ + object: 'fine_tuning.job.checkpoint'; + + /** + * The step number that the checkpoint was created at. + */ + step_number: number; +} + +export namespace FineTuningJobCheckpoint { + /** + * Metrics at the step number during the fine-tuning job. + */ + export interface Metrics { + full_valid_loss?: number; + + full_valid_mean_token_accuracy?: number; + + step?: number; + + train_loss?: number; + + train_mean_token_accuracy?: number; + + valid_loss?: number; + + valid_mean_token_accuracy?: number; + } +} + +export interface CheckpointListParams extends CursorPageParams {} + +export namespace Checkpoints { + export import FineTuningJobCheckpoint = CheckpointsAPI.FineTuningJobCheckpoint; + export import FineTuningJobCheckpointsPage = CheckpointsAPI.FineTuningJobCheckpointsPage; + export import CheckpointListParams = CheckpointsAPI.CheckpointListParams; +} diff --git a/src/resources/fine-tuning/jobs/index.ts b/src/resources/fine-tuning/jobs/index.ts new file mode 100644 index 000000000..275c776e9 --- /dev/null +++ b/src/resources/fine-tuning/jobs/index.ts @@ -0,0 +1,21 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + FineTuningJob, + FineTuningJobEvent, + FineTuningJobIntegration, + FineTuningJobWandbIntegration, + FineTuningJobWandbIntegrationObject, + JobCreateParams, + JobListParams, + JobListEventsParams, + FineTuningJobsPage, + FineTuningJobEventsPage, + Jobs, +} from './jobs'; +export { + FineTuningJobCheckpoint, + CheckpointListParams, + FineTuningJobCheckpointsPage, + Checkpoints, +} from './checkpoints'; diff --git a/src/resources/fine-tuning/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts similarity index 66% rename from src/resources/fine-tuning/jobs.ts rename to src/resources/fine-tuning/jobs/jobs.ts index eb77405ca..10b3d38d2 100644 --- a/src/resources/fine-tuning/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -3,10 +3,13 @@ import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; import { isRequestOptions } from 'openai/core'; -import * as JobsAPI from 'openai/resources/fine-tuning/jobs'; +import * as JobsAPI from 'openai/resources/fine-tuning/jobs/jobs'; +import * as CheckpointsAPI from 'openai/resources/fine-tuning/jobs/checkpoints'; import { CursorPage, type CursorPageParams } from 'openai/pagination'; export class Jobs extends APIResource { + checkpoints: CheckpointsAPI.Checkpoints = new CheckpointsAPI.Checkpoints(this._client); + /** * Creates a fine-tuning job which begins the process of creating a new model from * a given dataset. @@ -147,6 +150,11 @@ export interface FineTuningJob { */ result_files: Array; + /** + * The seed used for the fine-tuning job. + */ + seed: number; + /** * The current status of the fine-tuning job, which can be either * `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. @@ -171,6 +179,11 @@ export interface FineTuningJob { * [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). */ validation_file: string | null; + + /** + * A list of integrations to enable for this fine-tuning job. + */ + integrations?: Array | null; } export namespace FineTuningJob { @@ -227,6 +240,56 @@ export interface FineTuningJobEvent { object: 'fine_tuning.job.event'; } +export type FineTuningJobIntegration = FineTuningJobWandbIntegrationObject; + +/** + * The settings for your integration with Weights and Biases. This payload + * specifies the project that metrics will be sent to. Optionally, you can set an + * explicit display name for your run, add tags to your run, and set a default + * entity (team, username, etc) to be associated with your run. + */ +export interface FineTuningJobWandbIntegration { + /** + * The name of the project that the new run will be created under. + */ + project: string; + + /** + * The entity to use for the run. This allows you to set the team or username of + * the WandB user that you would like associated with the run. If not set, the + * default entity for the registered WandB API key is used. + */ + entity?: string | null; + + /** + * A display name to set for the run. If not set, we will use the Job ID as the + * name. + */ + name?: string | null; + + /** + * A list of tags to be attached to the newly created run. These tags are passed + * through directly to WandB. Some default tags are generated by OpenAI: + * "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + */ + tags?: Array; +} + +export interface FineTuningJobWandbIntegrationObject { + /** + * The type of the integration being enabled for the fine-tuning job + */ + type: 'wandb'; + + /** + * The settings for your integration with Weights and Biases. This payload + * specifies the project that metrics will be sent to. Optionally, you can set an + * explicit display name for your run, add tags to your run, and set a default + * entity (team, username, etc) to be associated with your run. + */ + wandb: FineTuningJobWandbIntegration; +} + export interface JobCreateParams { /** * The name of the model to fine-tune. You can select one of the @@ -253,6 +316,18 @@ export interface JobCreateParams { */ hyperparameters?: JobCreateParams.Hyperparameters; + /** + * A list of integrations to enable for your fine-tuning job. + */ + integrations?: Array | null; + + /** + * The seed controls the reproducibility of the job. Passing in the same seed and + * job parameters should produce the same results, but may differ in rare cases. If + * a seed is not specified, one will be generated for you. + */ + seed?: number | null; + /** * A string of up to 18 characters that will be added to your fine-tuned model * name. @@ -302,6 +377,57 @@ export namespace JobCreateParams { */ n_epochs?: 'auto' | number; } + + export interface Integration { + /** + * The type of integration to enable. Currently, only "wandb" (Weights and Biases) + * is supported. + */ + type: 'wandb'; + + /** + * The settings for your integration with Weights and Biases. This payload + * specifies the project that metrics will be sent to. Optionally, you can set an + * explicit display name for your run, add tags to your run, and set a default + * entity (team, username, etc) to be associated with your run. + */ + wandb: Integration.Wandb; + } + + export namespace Integration { + /** + * The settings for your integration with Weights and Biases. This payload + * specifies the project that metrics will be sent to. Optionally, you can set an + * explicit display name for your run, add tags to your run, and set a default + * entity (team, username, etc) to be associated with your run. + */ + export interface Wandb { + /** + * The name of the project that the new run will be created under. + */ + project: string; + + /** + * The entity to use for the run. This allows you to set the team or username of + * the WandB user that you would like associated with the run. If not set, the + * default entity for the registered WandB API key is used. + */ + entity?: string | null; + + /** + * A display name to set for the run. If not set, we will use the Job ID as the + * name. + */ + name?: string | null; + + /** + * A list of tags to be attached to the newly created run. These tags are passed + * through directly to WandB. Some default tags are generated by OpenAI: + * "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + */ + tags?: Array; + } + } } export interface JobListParams extends CursorPageParams {} @@ -311,9 +437,16 @@ export interface JobListEventsParams extends CursorPageParams {} export namespace Jobs { export import FineTuningJob = JobsAPI.FineTuningJob; export import FineTuningJobEvent = JobsAPI.FineTuningJobEvent; + export import FineTuningJobIntegration = JobsAPI.FineTuningJobIntegration; + export import FineTuningJobWandbIntegration = JobsAPI.FineTuningJobWandbIntegration; + export import FineTuningJobWandbIntegrationObject = JobsAPI.FineTuningJobWandbIntegrationObject; export import FineTuningJobsPage = JobsAPI.FineTuningJobsPage; export import FineTuningJobEventsPage = JobsAPI.FineTuningJobEventsPage; export import JobCreateParams = JobsAPI.JobCreateParams; export import JobListParams = JobsAPI.JobListParams; export import JobListEventsParams = JobsAPI.JobListEventsParams; + export import Checkpoints = CheckpointsAPI.Checkpoints; + export import FineTuningJobCheckpoint = CheckpointsAPI.FineTuningJobCheckpoint; + export import FineTuningJobCheckpointsPage = CheckpointsAPI.FineTuningJobCheckpointsPage; + export import CheckpointListParams = CheckpointsAPI.CheckpointListParams; } diff --git a/tests/api-resources/beta/assistants/assistants.test.ts b/tests/api-resources/beta/assistants/assistants.test.ts index b11075d06..62282148d 100644 --- a/tests/api-resources/beta/assistants/assistants.test.ts +++ b/tests/api-resources/beta/assistants/assistants.test.ts @@ -10,7 +10,7 @@ const openai = new OpenAI({ describe('resource assistants', () => { test('create: only required params', async () => { - const responsePromise = openai.beta.assistants.create({ model: 'string' }); + const responsePromise = openai.beta.assistants.create({ model: 'gpt-4-turbo' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -22,7 +22,7 @@ describe('resource assistants', () => { test('create: required and optional params', async () => { const response = await openai.beta.assistants.create({ - model: 'string', + model: 'gpt-4-turbo', description: 'string', file_ids: ['string', 'string', 'string'], instructions: 'string', diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 2911cfd53..2489d56e2 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -30,11 +30,16 @@ describe('resource runs', () => { { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, ], instructions: 'string', + max_completion_tokens: 256, + max_prompt_tokens: 256, metadata: {}, - model: 'string', + model: 'gpt-4-turbo', + response_format: 'none', stream: false, temperature: 1, + tool_choice: 'none', tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + truncation_strategy: { type: 'auto', last_messages: 1 }, }); }); diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index 3606019bd..028a150f4 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -106,8 +106,11 @@ describe('resource threads', () => { const response = await openai.beta.threads.createAndRun({ assistant_id: 'string', instructions: 'string', + max_completion_tokens: 256, + max_prompt_tokens: 256, metadata: {}, - model: 'string', + model: 'gpt-4-turbo', + response_format: 'none', stream: false, temperature: 1, thread: { @@ -118,7 +121,9 @@ describe('resource threads', () => { ], metadata: {}, }, + tool_choice: 'none', tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + truncation_strategy: { type: 'auto', last_messages: 1 }, }); }); }); diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index e0ccb3910..bd398b91d 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -12,7 +12,7 @@ describe('resource completions', () => { test('create: only required params', async () => { const responsePromise = openai.chat.completions.create({ messages: [{ content: 'string', role: 'system' }], - model: 'gpt-3.5-turbo', + model: 'gpt-4-turbo', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -26,7 +26,7 @@ describe('resource completions', () => { test('create: required and optional params', async () => { const response = await openai.chat.completions.create({ messages: [{ content: 'string', role: 'system', name: 'string' }], - model: 'gpt-3.5-turbo', + model: 'gpt-4-turbo', frequency_penalty: -2, function_call: 'none', functions: [{ description: 'string', name: 'string', parameters: { foo: 'bar' } }], diff --git a/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts b/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts new file mode 100644 index 000000000..1844d7c87 --- /dev/null +++ b/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts @@ -0,0 +1,42 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const openai = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource checkpoints', () => { + test('list', async () => { + const responsePromise = openai.fineTuning.jobs.checkpoints.list('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + openai.fineTuning.jobs.checkpoints.list('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + openai.fineTuning.jobs.checkpoints.list( + 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + { after: 'string', limit: 0 }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/fine-tuning/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts similarity index 87% rename from tests/api-resources/fine-tuning/jobs.test.ts rename to tests/api-resources/fine-tuning/jobs/jobs.test.ts index d8f230abd..d2207cd97 100644 --- a/tests/api-resources/fine-tuning/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -28,6 +28,36 @@ describe('resource jobs', () => { model: 'gpt-3.5-turbo', training_file: 'file-abc123', hyperparameters: { batch_size: 'auto', learning_rate_multiplier: 'auto', n_epochs: 'auto' }, + integrations: [ + { + type: 'wandb', + wandb: { + project: 'my-wandb-project', + name: 'string', + entity: 'string', + tags: ['custom-tag', 'custom-tag', 'custom-tag'], + }, + }, + { + type: 'wandb', + wandb: { + project: 'my-wandb-project', + name: 'string', + entity: 'string', + tags: ['custom-tag', 'custom-tag', 'custom-tag'], + }, + }, + { + type: 'wandb', + wandb: { + project: 'my-wandb-project', + name: 'string', + entity: 'string', + tags: ['custom-tag', 'custom-tag', 'custom-tag'], + }, + }, + ], + seed: 42, suffix: 'x', validation_file: 'file-abc123', }); From 0c75bbdb2022a5acf4e4b5e2997854f7784e46b7 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 15 Apr 2024 16:09:44 -0400 Subject: [PATCH 060/533] feat(api): add batch API (#768) https://platform.openai.com/docs/api-reference/batch/create --- .stats.yml | 2 +- api.md | 14 ++ src/index.ts | 7 + src/resources/batches.ts | 225 ++++++++++++++++++++++++++++ src/resources/index.ts | 1 + tests/api-resources/batches.test.ts | 71 +++++++++ 6 files changed, 319 insertions(+), 1 deletion(-) create mode 100644 src/resources/batches.ts create mode 100644 tests/api-resources/batches.test.ts diff --git a/.stats.yml b/.stats.yml index 284caebf4..47c2bce1c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1 +1 @@ -configured_endpoints: 52 +configured_endpoints: 55 diff --git a/api.md b/api.md index c6a2bf273..02030dc07 100644 --- a/api.md +++ b/api.md @@ -337,3 +337,17 @@ Methods: - client.beta.threads.messages.files.retrieve(threadId, messageId, fileId) -> MessageFile - client.beta.threads.messages.files.list(threadId, messageId, { ...params }) -> MessageFilesPage + +# Batches + +Types: + +- Batch +- BatchError +- BatchRequestCounts + +Methods: + +- client.batches.create({ ...params }) -> Batch +- client.batches.retrieve(batchId) -> Batch +- client.batches.cancel(batchId) -> Batch diff --git a/src/index.ts b/src/index.ts index 9a2b2eaad..84fdd3979 100644 --- a/src/index.ts +++ b/src/index.ts @@ -150,6 +150,7 @@ export class OpenAI extends Core.APIClient { models: API.Models = new API.Models(this); fineTuning: API.FineTuning = new API.FineTuning(this); beta: API.Beta = new API.Beta(this); + batches: API.Batches = new API.Batches(this); protected override defaultQuery(): Core.DefaultQuery | undefined { return this._options.defaultQuery; @@ -285,6 +286,12 @@ export namespace OpenAI { export import Beta = API.Beta; + export import Batches = API.Batches; + export import Batch = API.Batch; + export import BatchError = API.BatchError; + export import BatchRequestCounts = API.BatchRequestCounts; + export import BatchCreateParams = API.BatchCreateParams; + export import ErrorObject = API.ErrorObject; export import FunctionDefinition = API.FunctionDefinition; export import FunctionParameters = API.FunctionParameters; diff --git a/src/resources/batches.ts b/src/resources/batches.ts new file mode 100644 index 000000000..75b491a16 --- /dev/null +++ b/src/resources/batches.ts @@ -0,0 +1,225 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import * as Core from 'openai/core'; +import { APIResource } from 'openai/resource'; +import * as BatchesAPI from 'openai/resources/batches'; + +export class Batches extends APIResource { + /** + * Creates and executes a batch from an uploaded file of requests + */ + create(body: BatchCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/batches', { body, ...options }); + } + + /** + * Retrieves a batch. + */ + retrieve(batchId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/batches/${batchId}`, options); + } + + /** + * Cancels an in-progress batch. + */ + cancel(batchId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/batches/${batchId}/cancel`, options); + } +} + +export interface Batch { + id: string; + + /** + * The time frame within which the batch should be processed. + */ + completion_window: string; + + /** + * The Unix timestamp (in seconds) for when the batch was created. + */ + created_at: string; + + /** + * The OpenAI API endpoint used by the batch. + */ + endpoint: string; + + /** + * The ID of the input file for the batch. + */ + input_file_id: string; + + /** + * The object type, which is always `batch`. + */ + object: 'batch'; + + /** + * The current status of the batch. + */ + status: + | 'validating' + | 'failed' + | 'in_progress' + | 'finalizing' + | 'completed' + | 'expired' + | 'cancelling' + | 'cancelled'; + + /** + * The Unix timestamp (in seconds) for when the batch was cancelled. + */ + cancelled_at?: string; + + /** + * The Unix timestamp (in seconds) for when the batch started cancelling. + */ + cancelling_at?: string; + + /** + * The Unix timestamp (in seconds) for when the batch was completed. + */ + completed_at?: string; + + /** + * The ID of the file containing the outputs of requests with errors. + */ + error_file_id?: string; + + errors?: Batch.Errors; + + /** + * The Unix timestamp (in seconds) for when the batch expired. + */ + expired_at?: string; + + /** + * The Unix timestamp (in seconds) for when the batch will expire. + */ + expires_at?: string; + + /** + * The Unix timestamp (in seconds) for when the batch failed. + */ + failed_at?: string; + + /** + * The Unix timestamp (in seconds) for when the batch started finalizing. + */ + finalizing_at?: string; + + /** + * The Unix timestamp (in seconds) for when the batch started processing. + */ + in_progress_at?: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + + /** + * The ID of the file containing the outputs of successfully executed requests. + */ + output_file_id?: string; + + /** + * The request counts for different statuses within the batch. + */ + request_counts?: BatchRequestCounts; +} + +export namespace Batch { + export interface Errors { + data?: Array; + + /** + * The object type, which is always `list`. + */ + object?: string; + } +} + +export interface BatchError { + /** + * An error code identifying the error type. + */ + code?: string; + + /** + * The line number of the input file where the error occurred, if applicable. + */ + line?: number | null; + + /** + * A human-readable message providing more details about the error. + */ + message?: string; + + /** + * The name of the parameter that caused the error, if applicable. + */ + param?: string | null; +} + +/** + * The request counts for different statuses within the batch. + */ +export interface BatchRequestCounts { + /** + * Number of requests that have been completed successfully. + */ + completed: number; + + /** + * Number of requests that have failed. + */ + failed: number; + + /** + * Total number of requests in the batch. + */ + total: number; +} + +export interface BatchCreateParams { + /** + * The time frame within which the batch should be processed. Currently only `24h` + * is supported. + */ + completion_window: '24h'; + + /** + * The endpoint to be used for all requests in the batch. Currently only + * `/v1/chat/completions` is supported. + */ + endpoint: '/v1/chat/completions'; + + /** + * The ID of an uploaded file that contains requests for the new batch. + * + * See [upload file](https://platform.openai.com/docs/api-reference/files/create) + * for how to upload a file. + * + * Your input file must be formatted as a JSONL file, and must be uploaded with the + * purpose `batch`. + */ + input_file_id: string; + + /** + * Optional custom metadata for the batch. + */ + metadata?: Record | null; +} + +export namespace Batches { + export import Batch = BatchesAPI.Batch; + export import BatchError = BatchesAPI.BatchError; + export import BatchRequestCounts = BatchesAPI.BatchRequestCounts; + export import BatchCreateParams = BatchesAPI.BatchCreateParams; +} diff --git a/src/resources/index.ts b/src/resources/index.ts index a9741f5fd..282e57ea1 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -3,6 +3,7 @@ export * from './chat/index'; export * from './shared'; export { Audio } from './audio/audio'; +export { Batch, BatchError, BatchRequestCounts, BatchCreateParams, Batches } from './batches'; export { Beta } from './beta/beta'; export { Completion, diff --git a/tests/api-resources/batches.test.ts b/tests/api-resources/batches.test.ts new file mode 100644 index 000000000..e4a9015d1 --- /dev/null +++ b/tests/api-resources/batches.test.ts @@ -0,0 +1,71 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const openai = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource batches', () => { + test('create: only required params', async () => { + const responsePromise = openai.batches.create({ + completion_window: '24h', + endpoint: '/v1/chat/completions', + input_file_id: 'string', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await openai.batches.create({ + completion_window: '24h', + endpoint: '/v1/chat/completions', + input_file_id: 'string', + metadata: { foo: 'string' }, + }); + }); + + test('retrieve', async () => { + const responsePromise = openai.batches.retrieve('string'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(openai.batches.retrieve('string', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('cancel', async () => { + const responsePromise = openai.batches.cancel('string'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('cancel: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(openai.batches.cancel('string', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); +}); From 56f4821ac2f86e60231c31c1e007d68176100c7c Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 15 Apr 2024 16:10:04 -0400 Subject: [PATCH 061/533] release: 4.34.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 14 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index bd6b3284c..80372a7f2 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.33.1" + ".": "4.34.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index f3067e694..4a253da33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.34.0 (2024-04-15) + +Full Changelog: [v4.33.1...v4.34.0](https://github.com/openai/openai-node/compare/v4.33.1...v4.34.0) + +### Features + +* **api:** add batch API ([#768](https://github.com/openai/openai-node/issues/768)) ([7fe34f2](https://github.com/openai/openai-node/commit/7fe34f2d0bda9c1cb116a593f02bd0cc15a52e12)) +* **api:** updates ([#766](https://github.com/openai/openai-node/issues/766)) ([52bcc47](https://github.com/openai/openai-node/commit/52bcc47043e4c3ffe15ae9e7ac0fa87e2493aad9)) + ## 4.33.1 (2024-04-12) Full Changelog: [v4.33.0...v4.33.1](https://github.com/openai/openai-node/compare/v4.33.0...v4.33.1) diff --git a/README.md b/README.md index 2d1ae6089..774fd6b76 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.33.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.34.0/mod.ts'; ``` diff --git a/build-deno b/build-deno index c06cd3bcf..d9e64064c 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.33.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.34.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 998b6a2c7..a821e7f72 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.33.1", + "version": "4.34.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 0d8f2ffd7..3577d3d22 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.33.1'; // x-release-please-version +export const VERSION = '4.34.0'; // x-release-please-version From 8cdd7ea9a28455c84f2babaea998fde228287146 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 15 Apr 2024 17:33:50 -0400 Subject: [PATCH 062/533] feat(errors): add request_id property (#769) --- src/error.ts | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/error.ts b/src/error.ts index deac34c5d..19a60598a 100644 --- a/src/error.ts +++ b/src/error.ts @@ -13,6 +13,8 @@ export class APIError extends OpenAIError { readonly param: string | null | undefined; readonly type: string | undefined; + readonly request_id: string | null | undefined; + constructor( status: number | undefined, error: Object | undefined, @@ -22,6 +24,7 @@ export class APIError extends OpenAIError { super(`${APIError.makeMessage(status, error, message)}`); this.status = status; this.headers = headers; + this.request_id = headers?.['x-request-id']; const data = error as Record; this.error = data; From 116e38aae33a2d7b88c27d783a95b41e56500600 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 15 Apr 2024 17:34:10 -0400 Subject: [PATCH 063/533] release: 4.35.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 80372a7f2..c63d8fd43 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.34.0" + ".": "4.35.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a253da33..48a52d258 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.35.0 (2024-04-15) + +Full Changelog: [v4.34.0...v4.35.0](https://github.com/openai/openai-node/compare/v4.34.0...v4.35.0) + +### Features + +* **errors:** add request_id property ([#769](https://github.com/openai/openai-node/issues/769)) ([43aa6a1](https://github.com/openai/openai-node/commit/43aa6a19cfb1448903dfaddc4da3def2eda9cbab)) + ## 4.34.0 (2024-04-15) Full Changelog: [v4.33.1...v4.34.0](https://github.com/openai/openai-node/compare/v4.33.1...v4.34.0) diff --git a/README.md b/README.md index 774fd6b76..c32fcfcd9 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.34.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.35.0/mod.ts'; ``` diff --git a/build-deno b/build-deno index d9e64064c..ffdb2cb9d 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.34.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.35.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index a821e7f72..d57fe15cd 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.34.0", + "version": "4.35.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 3577d3d22..7ca672a0d 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.34.0'; // x-release-please-version +export const VERSION = '4.35.0'; // x-release-please-version From 7fa4400668977e4265bd26591a4712546e54892f Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 16 Apr 2024 09:17:37 -0400 Subject: [PATCH 064/533] feat(client): add header OpenAI-Project (#772) --- src/index.ts | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/index.ts b/src/index.ts index 84fdd3979..91267cfc0 100644 --- a/src/index.ts +++ b/src/index.ts @@ -18,6 +18,11 @@ export interface ClientOptions { */ organization?: string | null | undefined; + /** + * Defaults to process.env['OPENAI_PROJECT_ID']. + */ + project?: string | null | undefined; + /** * Override the default base URL for the API, e.g., "https://api.example.com/v2/" * @@ -85,6 +90,7 @@ export interface ClientOptions { export class OpenAI extends Core.APIClient { apiKey: string; organization: string | null; + project: string | null; private _options: ClientOptions; @@ -93,6 +99,7 @@ export class OpenAI extends Core.APIClient { * * @param {string | undefined} [opts.apiKey=process.env['OPENAI_API_KEY'] ?? undefined] * @param {string | null | undefined} [opts.organization=process.env['OPENAI_ORG_ID'] ?? null] + * @param {string | null | undefined} [opts.project=process.env['OPENAI_PROJECT_ID'] ?? null] * @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL'] ?? https://api.openai.com/v1] - Override the default base URL for the API. * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out. * @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections. @@ -106,6 +113,7 @@ export class OpenAI extends Core.APIClient { baseURL = Core.readEnv('OPENAI_BASE_URL'), apiKey = Core.readEnv('OPENAI_API_KEY'), organization = Core.readEnv('OPENAI_ORG_ID') ?? null, + project = Core.readEnv('OPENAI_PROJECT_ID') ?? null, ...opts }: ClientOptions = {}) { if (apiKey === undefined) { @@ -117,6 +125,7 @@ export class OpenAI extends Core.APIClient { const options: ClientOptions = { apiKey, organization, + project, ...opts, baseURL: baseURL || `https://api.openai.com/v1`, }; @@ -138,6 +147,7 @@ export class OpenAI extends Core.APIClient { this.apiKey = apiKey; this.organization = organization; + this.project = project; } completions: API.Completions = new API.Completions(this); @@ -160,6 +170,7 @@ export class OpenAI extends Core.APIClient { return { ...super.defaultHeaders(opts), 'OpenAI-Organization': this.organization, + 'OpenAI-Project': this.project, ...this._options.defaultHeaders, }; } From be0dd9f7b5341adbb32ae3f55853405d6c4039f0 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 16 Apr 2024 11:37:59 -0400 Subject: [PATCH 065/533] build: configure UTF-8 locale in devcontainer (#774) --- .devcontainer/Dockerfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index d03365a2b..8ea34be96 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -7,6 +7,10 @@ RUN apt-get update && apt-get install -y \ yarnpkg \ && apt-get clean autoclean +# Ensure UTF-8 encoding +ENV LANG=C.UTF-8 +ENV LC_ALL=C.UTF-8 + # Yarn RUN ln -sf /usr/bin/yarnpkg /usr/bin/yarn From 9dfc744169262d1a57a7be0453780dd77a726b6f Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 16 Apr 2024 12:54:04 -0400 Subject: [PATCH 066/533] feat: extract chat models to a named enum (#775) --- api.md | 4 ++++ src/index.ts | 1 + src/resources/chat/chat.ts | 23 +++++++++++++++++++++++ src/resources/chat/completions.ts | 23 ++--------------------- src/resources/chat/index.ts | 2 +- 5 files changed, 31 insertions(+), 22 deletions(-) diff --git a/api.md b/api.md index 02030dc07..7557ce133 100644 --- a/api.md +++ b/api.md @@ -20,6 +20,10 @@ Methods: # Chat +Types: + +- ChatModel + ## Completions Types: diff --git a/src/index.ts b/src/index.ts index 91267cfc0..7a776b2c1 100644 --- a/src/index.ts +++ b/src/index.ts @@ -238,6 +238,7 @@ export namespace OpenAI { export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; export import Chat = API.Chat; + export import ChatModel = API.ChatModel; export import ChatCompletion = API.ChatCompletion; export import ChatCompletionAssistantMessageParam = API.ChatCompletionAssistantMessageParam; export import ChatCompletionChunk = API.ChatCompletionChunk; diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 6c7bccb22..fa681ed64 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,13 +1,36 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from 'openai/resource'; +import * as ChatAPI from 'openai/resources/chat/chat'; import * as CompletionsAPI from 'openai/resources/chat/completions'; export class Chat extends APIResource { completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); } +export type ChatModel = + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0301' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613'; + export namespace Chat { + export import ChatModel = ChatAPI.ChatModel; export import Completions = CompletionsAPI.Completions; export import ChatCompletion = CompletionsAPI.ChatCompletion; export import ChatCompletionAssistantMessageParam = CompletionsAPI.ChatCompletionAssistantMessageParam; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 2288265ea..b9672f52b 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -6,6 +6,7 @@ import { APIResource } from 'openai/resource'; import * as ChatCompletionsAPI from 'openai/resources/chat/completions'; import * as CompletionsAPI from 'openai/resources/completions'; import * as Shared from 'openai/resources/shared'; +import * as ChatAPI from 'openai/resources/chat/chat'; import { Stream } from 'openai/streaming'; export class Completions extends APIResource { @@ -665,27 +666,7 @@ export interface ChatCompletionCreateParamsBase { * [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) * table for details on which models work with the Chat API. */ - model: - | (string & {}) - | 'gpt-4-turbo' - | 'gpt-4-turbo-2024-04-09' - | 'gpt-4-0125-preview' - | 'gpt-4-turbo-preview' - | 'gpt-4-1106-preview' - | 'gpt-4-vision-preview' - | 'gpt-4' - | 'gpt-4-0314' - | 'gpt-4-0613' - | 'gpt-4-32k' - | 'gpt-4-32k-0314' - | 'gpt-4-32k-0613' - | 'gpt-3.5-turbo' - | 'gpt-3.5-turbo-16k' - | 'gpt-3.5-turbo-0301' - | 'gpt-3.5-turbo-0613' - | 'gpt-3.5-turbo-1106' - | 'gpt-3.5-turbo-0125' - | 'gpt-3.5-turbo-16k-0613'; + model: (string & {}) | ChatAPI.ChatModel; /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on their diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index 78a7516ed..ef72bbbc9 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -1,6 +1,5 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { Chat } from './chat'; export { ChatCompletion, ChatCompletionAssistantMessageParam, @@ -30,3 +29,4 @@ export { CompletionCreateParamsStreaming, Completions, } from './completions'; +export { ChatModel, Chat } from './chat'; From 6f72e7ad3e4e151c9334f4449d1c3555255c2793 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 16 Apr 2024 12:54:23 -0400 Subject: [PATCH 067/533] release: 4.36.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c63d8fd43..c1ce2c41b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.35.0" + ".": "4.36.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 48a52d258..3ddd03a8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.36.0 (2024-04-16) + +Full Changelog: [v4.35.0...v4.36.0](https://github.com/openai/openai-node/compare/v4.35.0...v4.36.0) + +### Features + +* **client:** add header OpenAI-Project ([#772](https://github.com/openai/openai-node/issues/772)) ([bb4df37](https://github.com/openai/openai-node/commit/bb4df3722082fb44b7d4feb7a47df796149150a2)) +* extract chat models to a named enum ([#775](https://github.com/openai/openai-node/issues/775)) ([141d2ed](https://github.com/openai/openai-node/commit/141d2ed308141dc751869353208e4d0632d3650c)) + + +### Build System + +* configure UTF-8 locale in devcontainer ([#774](https://github.com/openai/openai-node/issues/774)) ([bebf4f0](https://github.com/openai/openai-node/commit/bebf4f0ca1f884f8747caff0f0e065aafffde096)) + ## 4.35.0 (2024-04-15) Full Changelog: [v4.34.0...v4.35.0](https://github.com/openai/openai-node/compare/v4.34.0...v4.35.0) diff --git a/README.md b/README.md index c32fcfcd9..406434e6d 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.35.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.36.0/mod.ts'; ``` diff --git a/build-deno b/build-deno index ffdb2cb9d..6389062ec 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.35.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.36.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index d57fe15cd..e848ce857 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.35.0", + "version": "4.36.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 7ca672a0d..460925cae 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.35.0'; // x-release-please-version +export const VERSION = '4.36.0'; // x-release-please-version From 0a1234dde22618ceb88954a8e480b6715b36f5b7 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 17 Apr 2024 12:34:49 -0400 Subject: [PATCH 068/533] feat(api): add vector stores (#776) --- .stats.yml | 2 +- README.md | 16 +- api.md | 156 +++-- helpers.md | 23 +- src/lib/AssistantStream.ts | 4 +- src/lib/Util.ts | 23 + .../beta/{assistants => }/assistants.ts | 298 +++++++-- src/resources/beta/assistants/files.ts | 154 ----- src/resources/beta/assistants/index.ts | 28 - src/resources/beta/beta.ts | 13 +- src/resources/beta/index.ts | 13 +- src/resources/beta/threads/index.ts | 2 +- .../beta/threads/{messages => }/messages.ts | 71 +- src/resources/beta/threads/messages/files.ts | 105 --- src/resources/beta/threads/messages/index.ts | 30 - src/resources/beta/threads/runs/index.ts | 4 +- src/resources/beta/threads/runs/runs.ts | 130 ++-- src/resources/beta/threads/runs/steps.ts | 98 +-- src/resources/beta/threads/threads.ts | 608 ++++++++++++++++-- .../beta/vector-stores/file-batches.ts | 292 +++++++++ src/resources/beta/vector-stores/files.ts | 277 ++++++++ src/resources/beta/vector-stores/index.ts | 25 + .../beta/vector-stores/vector-stores.ts | 318 +++++++++ src/resources/fine-tuning/jobs/jobs.ts | 2 +- .../beta/{assistants => }/assistants.test.ts | 11 +- .../threads/{messages => }/messages.test.ts | 6 +- .../beta/threads/messages/files.test.ts | 65 -- .../beta/threads/runs/runs.test.ts | 34 +- .../beta/threads/threads.test.ts | 85 ++- .../beta/vector-stores/file-batches.test.ts | 98 +++ .../files.test.ts | 22 +- .../beta/vector-stores/vector-stores.test.ts | 97 +++ 32 files changed, 2420 insertions(+), 690 deletions(-) create mode 100644 src/lib/Util.ts rename src/resources/beta/{assistants => }/assistants.ts (74%) delete mode 100644 src/resources/beta/assistants/files.ts delete mode 100644 src/resources/beta/assistants/index.ts rename src/resources/beta/threads/{messages => }/messages.ts (89%) delete mode 100644 src/resources/beta/threads/messages/files.ts delete mode 100644 src/resources/beta/threads/messages/index.ts create mode 100644 src/resources/beta/vector-stores/file-batches.ts create mode 100644 src/resources/beta/vector-stores/files.ts create mode 100644 src/resources/beta/vector-stores/index.ts create mode 100644 src/resources/beta/vector-stores/vector-stores.ts rename tests/api-resources/beta/{assistants => }/assistants.test.ts (93%) rename tests/api-resources/beta/threads/{messages => }/messages.test.ts (93%) delete mode 100644 tests/api-resources/beta/threads/messages/files.test.ts create mode 100644 tests/api-resources/beta/vector-stores/file-batches.test.ts rename tests/api-resources/beta/{assistants => vector-stores}/files.test.ts (77%) create mode 100644 tests/api-resources/beta/vector-stores/vector-stores.test.ts diff --git a/.stats.yml b/.stats.yml index 47c2bce1c..2814bb777 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1 +1 @@ -configured_endpoints: 55 +configured_endpoints: 62 diff --git a/README.md b/README.md index 406434e6d..b75320e78 100644 --- a/README.md +++ b/README.md @@ -102,7 +102,7 @@ Documentation for each method, request param, and response field are available i ### Polling Helpers -When interacting with the API some actions such as starting a Run may take time to complete. The SDK includes +When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes helper functions which will poll the status until it reaches a terminal state and then return the resulting object. If an API method results in an action which could benefit from polling there will be a corresponding version of the method ending in 'AndPoll'. @@ -117,6 +117,20 @@ const run = await openai.beta.threads.runs.createAndPoll(thread.id, { More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle) +### Bulk Upload Helpers + +When creating an interacting with vector stores, you can use the polling helpers to monitor the status of operations. +For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once. + +```ts +const fileList = [ + createReadStream('/home/data/example.pdf'), + ... +]; + +const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, fileList); +``` + ### Streaming Helpers The SDK also includes helpers to process streams and handle the incoming events. diff --git a/api.md b/api.md index 7557ce133..8161fb2c7 100644 --- a/api.md +++ b/api.md @@ -179,53 +179,88 @@ Methods: # Beta -## Chat +## VectorStores -### Completions +Types: + +- VectorStore +- VectorStoreDeleted Methods: -- client.beta.chat.completions.runFunctions(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner -- client.beta.chat.completions.runTools(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner -- client.beta.chat.completions.stream(body, options?) -> ChatCompletionStream +- client.beta.vectorStores.create({ ...params }) -> VectorStore +- client.beta.vectorStores.retrieve(vectorStoreId) -> VectorStore +- client.beta.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore +- client.beta.vectorStores.list({ ...params }) -> VectorStoresPage +- client.beta.vectorStores.del(vectorStoreId) -> VectorStoreDeleted -## Assistants +### Files Types: -- Assistant -- AssistantDeleted -- AssistantStreamEvent -- AssistantTool -- CodeInterpreterTool -- FunctionTool -- MessageStreamEvent -- RetrievalTool -- RunStepStreamEvent -- RunStreamEvent -- ThreadStreamEvent +- VectorStoreFile +- VectorStoreFileDeleted Methods: -- client.beta.assistants.create({ ...params }) -> Assistant -- client.beta.assistants.retrieve(assistantId) -> Assistant -- client.beta.assistants.update(assistantId, { ...params }) -> Assistant -- client.beta.assistants.list({ ...params }) -> AssistantsPage -- client.beta.assistants.del(assistantId) -> AssistantDeleted +- client.beta.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile +- client.beta.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile +- client.beta.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesPage +- client.beta.vectorStores.files.del(vectorStoreId, fileId) -> VectorStoreFileDeleted +- client.beta.vectorStores.files.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.poll(vectorStoreId, fileId, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.upload(vectorStoreId, file, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.uploadAndPoll(vectorStoreId, file, options?) -> Promise<VectorStoreFile> -### Files +### FileBatches Types: -- AssistantFile -- FileDeleteResponse +- VectorStoreFileBatch Methods: -- client.beta.assistants.files.create(assistantId, { ...params }) -> AssistantFile -- client.beta.assistants.files.retrieve(assistantId, fileId) -> AssistantFile -- client.beta.assistants.files.list(assistantId, { ...params }) -> AssistantFilesPage -- client.beta.assistants.files.del(assistantId, fileId) -> FileDeleteResponse +- client.beta.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatch +- client.beta.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatch +- client.beta.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatch +- client.beta.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesPage +- client.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFileBatch> +- client.beta.vectorStores.fileBatches.poll(vectorStoreId, batchId, options?) -> Promise<VectorStoreFileBatch> +- client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options?) -> Promise<VectorStoreFileBatch> + +## Chat + +### Completions + +Methods: + +- client.beta.chat.completions.runFunctions(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner +- client.beta.chat.completions.runTools(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner +- client.beta.chat.completions.stream(body, options?) -> ChatCompletionStream + +## Assistants + +Types: + +- Assistant +- AssistantDeleted +- AssistantStreamEvent +- AssistantTool +- CodeInterpreterTool +- FileSearchTool +- FunctionTool +- MessageStreamEvent +- RunStepStreamEvent +- RunStreamEvent +- ThreadStreamEvent + +Methods: + +- client.beta.assistants.create({ ...params }) -> Assistant +- client.beta.assistants.retrieve(assistantId) -> Assistant +- client.beta.assistants.update(assistantId, { ...params }) -> Assistant +- client.beta.assistants.list({ ...params }) -> AssistantsPage +- client.beta.assistants.del(assistantId) -> AssistantDeleted ## Threads @@ -280,11 +315,11 @@ Types: - CodeInterpreterOutputImage - CodeInterpreterToolCall - CodeInterpreterToolCallDelta +- FileSearchToolCall +- FileSearchToolCallDelta - FunctionToolCall - FunctionToolCallDelta - MessageCreationStepDetails -- RetrievalToolCall -- RetrievalToolCallDelta - RunStep - RunStepDelta - RunStepDeltaEvent @@ -303,44 +338,33 @@ Methods: Types: -- Annotation -- AnnotationDelta -- FileCitationAnnotation -- FileCitationDeltaAnnotation -- FilePathAnnotation -- FilePathDeltaAnnotation -- ImageFile -- ImageFileContentBlock -- ImageFileDelta -- ImageFileDeltaBlock -- Message -- MessageContent -- MessageContentDelta -- MessageDeleted -- MessageDelta -- MessageDeltaEvent -- Text -- TextContentBlock -- TextDelta -- TextDeltaBlock - -Methods: - -- client.beta.threads.messages.create(threadId, { ...params }) -> Message -- client.beta.threads.messages.retrieve(threadId, messageId) -> Message -- client.beta.threads.messages.update(threadId, messageId, { ...params }) -> Message -- client.beta.threads.messages.list(threadId, { ...params }) -> MessagesPage - -#### Files - -Types: - -- MessageFile +- Annotation +- AnnotationDelta +- FileCitationAnnotation +- FileCitationDeltaAnnotation +- FilePathAnnotation +- FilePathDeltaAnnotation +- ImageFile +- ImageFileContentBlock +- ImageFileDelta +- ImageFileDeltaBlock +- Message +- MessageContent +- MessageContentDelta +- MessageDeleted +- MessageDelta +- MessageDeltaEvent +- Text +- TextContentBlock +- TextDelta +- TextDeltaBlock Methods: -- client.beta.threads.messages.files.retrieve(threadId, messageId, fileId) -> MessageFile -- client.beta.threads.messages.files.list(threadId, messageId, { ...params }) -> MessageFilesPage +- client.beta.threads.messages.create(threadId, { ...params }) -> Message +- client.beta.threads.messages.retrieve(threadId, messageId) -> Message +- client.beta.threads.messages.update(threadId, messageId, { ...params }) -> Message +- client.beta.threads.messages.list(threadId, { ...params }) -> MessagesPage # Batches diff --git a/helpers.md b/helpers.md index 7a34c3023..dda1ab26b 100644 --- a/helpers.md +++ b/helpers.md @@ -1,4 +1,4 @@ -# Streaming Helpers +# Helpers OpenAI supports streaming responses when interacting with the [Chat](#chat-streaming) or [Assistant](#assistant-streaming-api) APIs. @@ -449,3 +449,24 @@ See an example of a Next.JS integration here [`examples/stream-to-client-next.ts #### Proxy Streaming to a Browser See an example of using express to stream to a browser here [`examples/stream-to-client-express.ts`](examples/stream-to-client-express.ts). + +# Polling Helpers + +When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. +The SDK includes helper functions which will poll the status until it reaches a terminal state and then return the resulting object. +If an API method results in an action which could benefit from polling there will be a corresponding version of the +method ending in `_AndPoll`. + +All methods also allow you to set the polling frequency, how often the API is checked for an update, via a function argument (`pollIntervalMs`). + +The polling methods are: + +```ts +client.beta.threads.createAndRunPoll(...) +client.beta.threads.runs.createAndPoll((...) +client.beta.threads.runs.submitToolOutputsAndPoll((...) +client.beta.vectorStores.files.uploadAndPoll((...) +client.beta.vectorStores.files.createAndPoll((...) +client.beta.vectorStores.fileBatches.createAndPoll((...) +client.beta.vectorStores.fileBatches.uploadAndPoll((...) +``` diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index ece0ec65c..a2974826c 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -7,7 +7,7 @@ import { ImageFile, TextDelta, Messages, -} from 'openai/resources/beta/threads/messages/messages'; +} from 'openai/resources/beta/threads/messages'; import * as Core from 'openai/core'; import { RequestOptions } from 'openai/core'; import { @@ -30,7 +30,7 @@ import { MessageStreamEvent, RunStepStreamEvent, RunStreamEvent, -} from 'openai/resources/beta/assistants/assistants'; +} from 'openai/resources/beta/assistants'; import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from 'openai/resources/beta/threads/runs/steps'; import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads'; import MessageDelta = Messages.MessageDelta; diff --git a/src/lib/Util.ts b/src/lib/Util.ts new file mode 100644 index 000000000..ae09b8a91 --- /dev/null +++ b/src/lib/Util.ts @@ -0,0 +1,23 @@ +/** + * Like `Promise.allSettled()` but throws an error if any promises are rejected. + */ +export const allSettledWithThrow = async (promises: Promise[]): Promise => { + const results = await Promise.allSettled(promises); + const rejected = results.filter((result): result is PromiseRejectedResult => result.status === 'rejected'); + if (rejected.length) { + for (const result of rejected) { + console.error(result.reason); + } + + throw new Error(`${rejected.length} promise(s) failed - see the above errors`); + } + + // Note: TS was complaining about using `.filter().map()` here for some reason + const values: R[] = []; + for (const result of results) { + if (result.status === 'fulfilled') { + values.push(result.value); + } + } + return values; +}; diff --git a/src/resources/beta/assistants/assistants.ts b/src/resources/beta/assistants.ts similarity index 74% rename from src/resources/beta/assistants/assistants.ts rename to src/resources/beta/assistants.ts index fc9afe2ae..c0827848e 100644 --- a/src/resources/beta/assistants/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -3,18 +3,15 @@ import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; import { isRequestOptions } from 'openai/core'; -import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants'; +import * as AssistantsAPI from 'openai/resources/beta/assistants'; import * as Shared from 'openai/resources/shared'; -import * as FilesAPI from 'openai/resources/beta/assistants/files'; +import * as MessagesAPI from 'openai/resources/beta/threads/messages'; import * as ThreadsAPI from 'openai/resources/beta/threads/threads'; -import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages'; import * as RunsAPI from 'openai/resources/beta/threads/runs/runs'; import * as StepsAPI from 'openai/resources/beta/threads/runs/steps'; import { CursorPage, type CursorPageParams } from 'openai/pagination'; export class Assistants extends APIResource { - files: FilesAPI.Files = new FilesAPI.Files(this._client); - /** * Create an assistant with a model and instructions. */ @@ -22,7 +19,7 @@ export class Assistants extends APIResource { return this._client.post('/assistants', { body, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -32,7 +29,7 @@ export class Assistants extends APIResource { retrieve(assistantId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/assistants/${assistantId}`, { ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -47,7 +44,7 @@ export class Assistants extends APIResource { return this._client.post(`/assistants/${assistantId}`, { body, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -69,7 +66,7 @@ export class Assistants extends APIResource { return this._client.getAPIList('/assistants', AssistantsPage, { query, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -79,7 +76,7 @@ export class Assistants extends APIResource { del(assistantId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/assistants/${assistantId}`, { ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } } @@ -105,13 +102,6 @@ export interface Assistant { */ description: string | null; - /** - * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs - * attached to this assistant. There can be a maximum of 20 files attached to the - * assistant. Files are ordered by their creation date in ascending order. - */ - file_ids: Array; - /** * The system instructions that the assistant uses. The maximum length is 256,000 * characters. @@ -147,9 +137,53 @@ export interface Assistant { /** * A list of tool enabled on the assistant. There can be a maximum of 128 tools per - * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + * assistant. Tools can be of types `code_interpreter`, `file_search`, or + * `function`. */ tools: Array; + + /** + * A set of resources that are used by the assistant's tools. The resources are + * specific to the type of tool. For example, the `code_interpreter` tool requires + * a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + tool_resources?: Assistant.ToolResources | null; +} + +export namespace Assistant { + /** + * A set of resources that are used by the assistant's tools. The resources are + * specific to the type of tool. For example, the `code_interpreter` tool requires + * a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + export interface ToolResources { + code_interpreter?: ToolResources.CodeInterpreter; + + file_search?: ToolResources.FileSearch; + } + + export namespace ToolResources { + export interface CodeInterpreter { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter`` tool. There can be a maximum of 20 files + * associated with the tool. + */ + file_ids?: Array; + } + + export interface FileSearch { + /** + * The ID of the + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this assistant. There can be a maximum of 1 vector store attached to + * the assistant. + */ + vector_store_ids?: Array; + } + } } export interface AssistantDeleted { @@ -535,7 +569,7 @@ export namespace AssistantStreamEvent { } } -export type AssistantTool = CodeInterpreterTool | RetrievalTool | FunctionTool; +export type AssistantTool = CodeInterpreterTool | FileSearchTool | FunctionTool; export interface CodeInterpreterTool { /** @@ -544,6 +578,13 @@ export interface CodeInterpreterTool { type: 'code_interpreter'; } +export interface FileSearchTool { + /** + * The type of tool being defined: `file_search` + */ + type: 'file_search'; +} + export interface FunctionTool { function: Shared.FunctionDefinition; @@ -642,13 +683,6 @@ export namespace MessageStreamEvent { } } -export interface RetrievalTool { - /** - * The type of tool being defined: `retrieval` - */ - type: 'retrieval'; -} - /** * Occurs when a * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is @@ -956,13 +990,6 @@ export interface AssistantCreateParams { */ description?: string | null; - /** - * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs - * attached to this assistant. There can be a maximum of 20 files attached to the - * assistant. Files are ordered by their creation date in ascending order. - */ - file_ids?: Array; - /** * The system instructions that the assistant uses. The maximum length is 256,000 * characters. @@ -982,27 +1009,123 @@ export interface AssistantCreateParams { */ name?: string | null; + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * message the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to + * produce JSON yourself via a system or user message. Without this, the model may + * generate an unending stream of whitespace until the generation reaches the token + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. + */ + response_format?: ThreadsAPI.AssistantResponseFormatOption | null; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. + */ + temperature?: number | null; + + /** + * A set of resources that are used by the assistant's tools. The resources are + * specific to the type of tool. For example, the `code_interpreter` tool requires + * a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + tool_resources?: AssistantCreateParams.ToolResources | null; + /** * A list of tool enabled on the assistant. There can be a maximum of 128 tools per - * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + * assistant. Tools can be of types `code_interpreter`, `file_search`, or + * `function`. */ tools?: Array; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + top_p?: number | null; } -export interface AssistantUpdateParams { +export namespace AssistantCreateParams { /** - * The description of the assistant. The maximum length is 512 characters. + * A set of resources that are used by the assistant's tools. The resources are + * specific to the type of tool. For example, the `code_interpreter` tool requires + * a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. */ - description?: string | null; + export interface ToolResources { + code_interpreter?: ToolResources.CodeInterpreter; + + file_search?: ToolResources.FileSearch; + } + + export namespace ToolResources { + export interface CodeInterpreter { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + file_ids?: Array; + } + + export interface FileSearch { + /** + * The + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this assistant. There can be a maximum of 1 vector store attached to + * the assistant. + */ + vector_store_ids?: Array; + + /** + * A helper to create a + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * with file_ids and attach it to this assistant. There can be a maximum of 1 + * vector store attached to the assistant. + */ + vector_stores?: Array; + } + export namespace FileSearch { + export interface VectorStore { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + * add to the vector store. There can be a maximum of 10000 files in a vector + * store. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to a vector store. This can be + * useful for storing additional information about the vector store in a structured + * format. Keys can be a maximum of 64 characters long and values can be a maxium + * of 512 characters long. + */ + metadata?: unknown; + } + } + } +} + +export interface AssistantUpdateParams { /** - * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs - * attached to this assistant. There can be a maximum of 20 files attached to the - * assistant. Files are ordered by their creation date in ascending order. If a - * file was previously attached to the list but does not show up in the list, it - * will be deleted from the assistant. + * The description of the assistant. The maximum length is 512 characters. */ - file_ids?: Array; + description?: string | null; /** * The system instructions that the assistant uses. The maximum length is 256,000 @@ -1032,11 +1155,90 @@ export interface AssistantUpdateParams { */ name?: string | null; + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * message the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to + * produce JSON yourself via a system or user message. Without this, the model may + * generate an unending stream of whitespace until the generation reaches the token + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. + */ + response_format?: ThreadsAPI.AssistantResponseFormatOption | null; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. + */ + temperature?: number | null; + + /** + * A set of resources that are used by the assistant's tools. The resources are + * specific to the type of tool. For example, the `code_interpreter` tool requires + * a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + tool_resources?: AssistantUpdateParams.ToolResources | null; + /** * A list of tool enabled on the assistant. There can be a maximum of 128 tools per - * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + * assistant. Tools can be of types `code_interpreter`, `file_search`, or + * `function`. */ tools?: Array; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + top_p?: number | null; +} + +export namespace AssistantUpdateParams { + /** + * A set of resources that are used by the assistant's tools. The resources are + * specific to the type of tool. For example, the `code_interpreter` tool requires + * a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + export interface ToolResources { + code_interpreter?: ToolResources.CodeInterpreter; + + file_search?: ToolResources.FileSearch; + } + + export namespace ToolResources { + export interface CodeInterpreter { + /** + * Overrides the list of + * [file](https://platform.openai.com/docs/api-reference/files) IDs made available + * to the `code_interpreter` tool. There can be a maximum of 20 files associated + * with the tool. + */ + file_ids?: Array; + } + + export interface FileSearch { + /** + * Overrides the + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this assistant. There can be a maximum of 1 vector store attached to + * the assistant. + */ + vector_store_ids?: Array; + } + } } export interface AssistantListParams extends CursorPageParams { @@ -1061,9 +1263,9 @@ export namespace Assistants { export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent; export import AssistantTool = AssistantsAPI.AssistantTool; export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool; + export import FileSearchTool = AssistantsAPI.FileSearchTool; export import FunctionTool = AssistantsAPI.FunctionTool; export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent; - export import RetrievalTool = AssistantsAPI.RetrievalTool; export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent; export import RunStreamEvent = AssistantsAPI.RunStreamEvent; export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent; @@ -1071,10 +1273,4 @@ export namespace Assistants { export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams; export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams; export import AssistantListParams = AssistantsAPI.AssistantListParams; - export import Files = FilesAPI.Files; - export import AssistantFile = FilesAPI.AssistantFile; - export import FileDeleteResponse = FilesAPI.FileDeleteResponse; - export import AssistantFilesPage = FilesAPI.AssistantFilesPage; - export import FileCreateParams = FilesAPI.FileCreateParams; - export import FileListParams = FilesAPI.FileListParams; } diff --git a/src/resources/beta/assistants/files.ts b/src/resources/beta/assistants/files.ts deleted file mode 100644 index 51fd0c0d8..000000000 --- a/src/resources/beta/assistants/files.ts +++ /dev/null @@ -1,154 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import * as FilesAPI from 'openai/resources/beta/assistants/files'; -import { CursorPage, type CursorPageParams } from 'openai/pagination'; - -export class Files extends APIResource { - /** - * Create an assistant file by attaching a - * [File](https://platform.openai.com/docs/api-reference/files) to an - * [assistant](https://platform.openai.com/docs/api-reference/assistants). - */ - create( - assistantId: string, - body: FileCreateParams, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post(`/assistants/${assistantId}/files`, { - body, - ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, - }); - } - - /** - * Retrieves an AssistantFile. - */ - retrieve( - assistantId: string, - fileId: string, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.get(`/assistants/${assistantId}/files/${fileId}`, { - ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, - }); - } - - /** - * Returns a list of assistant files. - */ - list( - assistantId: string, - query?: FileListParams, - options?: Core.RequestOptions, - ): Core.PagePromise; - list( - assistantId: string, - options?: Core.RequestOptions, - ): Core.PagePromise; - list( - assistantId: string, - query: FileListParams | Core.RequestOptions = {}, - options?: Core.RequestOptions, - ): Core.PagePromise { - if (isRequestOptions(query)) { - return this.list(assistantId, {}, query); - } - return this._client.getAPIList(`/assistants/${assistantId}/files`, AssistantFilesPage, { - query, - ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, - }); - } - - /** - * Delete an assistant file. - */ - del( - assistantId: string, - fileId: string, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.delete(`/assistants/${assistantId}/files/${fileId}`, { - ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, - }); - } -} - -export class AssistantFilesPage extends CursorPage {} - -/** - * A list of [Files](https://platform.openai.com/docs/api-reference/files) attached - * to an `assistant`. - */ -export interface AssistantFile { - /** - * The identifier, which can be referenced in API endpoints. - */ - id: string; - - /** - * The assistant ID that the file is attached to. - */ - assistant_id: string; - - /** - * The Unix timestamp (in seconds) for when the assistant file was created. - */ - created_at: number; - - /** - * The object type, which is always `assistant.file`. - */ - object: 'assistant.file'; -} - -/** - * Deletes the association between the assistant and the file, but does not delete - * the [File](https://platform.openai.com/docs/api-reference/files) object itself. - */ -export interface FileDeleteResponse { - id: string; - - deleted: boolean; - - object: 'assistant.file.deleted'; -} - -export interface FileCreateParams { - /** - * A [File](https://platform.openai.com/docs/api-reference/files) ID (with - * `purpose="assistants"`) that the assistant should use. Useful for tools like - * `retrieval` and `code_interpreter` that can access files. - */ - file_id: string; -} - -export interface FileListParams extends CursorPageParams { - /** - * A cursor for use in pagination. `before` is an object ID that defines your place - * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. - */ - before?: string; - - /** - * Sort order by the `created_at` timestamp of the objects. `asc` for ascending - * order and `desc` for descending order. - */ - order?: 'asc' | 'desc'; -} - -export namespace Files { - export import AssistantFile = FilesAPI.AssistantFile; - export import FileDeleteResponse = FilesAPI.FileDeleteResponse; - export import AssistantFilesPage = FilesAPI.AssistantFilesPage; - export import FileCreateParams = FilesAPI.FileCreateParams; - export import FileListParams = FilesAPI.FileListParams; -} diff --git a/src/resources/beta/assistants/index.ts b/src/resources/beta/assistants/index.ts deleted file mode 100644 index c191d338b..000000000 --- a/src/resources/beta/assistants/index.ts +++ /dev/null @@ -1,28 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { - Assistant, - AssistantDeleted, - AssistantStreamEvent, - AssistantTool, - CodeInterpreterTool, - FunctionTool, - MessageStreamEvent, - RetrievalTool, - RunStepStreamEvent, - RunStreamEvent, - ThreadStreamEvent, - AssistantCreateParams, - AssistantUpdateParams, - AssistantListParams, - AssistantsPage, - Assistants, -} from './assistants'; -export { - AssistantFile, - FileDeleteResponse, - FileCreateParams, - FileListParams, - AssistantFilesPage, - Files, -} from './files'; diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 8f8148f9b..ff79d5242 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -1,17 +1,26 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from 'openai/resource'; -import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants'; +import * as AssistantsAPI from 'openai/resources/beta/assistants'; import * as ChatAPI from 'openai/resources/beta/chat/chat'; import * as ThreadsAPI from 'openai/resources/beta/threads/threads'; +import * as VectorStoresAPI from 'openai/resources/beta/vector-stores/vector-stores'; export class Beta extends APIResource { + vectorStores: VectorStoresAPI.VectorStores = new VectorStoresAPI.VectorStores(this._client); chat: ChatAPI.Chat = new ChatAPI.Chat(this._client); assistants: AssistantsAPI.Assistants = new AssistantsAPI.Assistants(this._client); threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this._client); } export namespace Beta { + export import VectorStores = VectorStoresAPI.VectorStores; + export import VectorStore = VectorStoresAPI.VectorStore; + export import VectorStoreDeleted = VectorStoresAPI.VectorStoreDeleted; + export import VectorStoresPage = VectorStoresAPI.VectorStoresPage; + export import VectorStoreCreateParams = VectorStoresAPI.VectorStoreCreateParams; + export import VectorStoreUpdateParams = VectorStoresAPI.VectorStoreUpdateParams; + export import VectorStoreListParams = VectorStoresAPI.VectorStoreListParams; export import Chat = ChatAPI.Chat; export import Assistants = AssistantsAPI.Assistants; export import Assistant = AssistantsAPI.Assistant; @@ -19,9 +28,9 @@ export namespace Beta { export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent; export import AssistantTool = AssistantsAPI.AssistantTool; export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool; + export import FileSearchTool = AssistantsAPI.FileSearchTool; export import FunctionTool = AssistantsAPI.FunctionTool; export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent; - export import RetrievalTool = AssistantsAPI.RetrievalTool; export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent; export import RunStreamEvent = AssistantsAPI.RunStreamEvent; export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent; diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index 54407edb3..029cd084c 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -6,9 +6,9 @@ export { AssistantStreamEvent, AssistantTool, CodeInterpreterTool, + FileSearchTool, FunctionTool, MessageStreamEvent, - RetrievalTool, RunStepStreamEvent, RunStreamEvent, ThreadStreamEvent, @@ -17,7 +17,7 @@ export { AssistantListParams, AssistantsPage, Assistants, -} from './assistants/index'; +} from './assistants'; export { AssistantResponseFormat, AssistantResponseFormatOption, @@ -37,3 +37,12 @@ export { } from './threads/index'; export { Beta } from './beta'; export { Chat } from './chat/index'; +export { + VectorStore, + VectorStoreDeleted, + VectorStoreCreateParams, + VectorStoreUpdateParams, + VectorStoreListParams, + VectorStoresPage, + VectorStores, +} from './vector-stores/index'; diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index 5f41766a9..d0ebb1798 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -26,7 +26,7 @@ export { MessageListParams, MessagesPage, Messages, -} from './messages/index'; +} from './messages'; export { AssistantResponseFormat, AssistantResponseFormatOption, diff --git a/src/resources/beta/threads/messages/messages.ts b/src/resources/beta/threads/messages.ts similarity index 89% rename from src/resources/beta/threads/messages/messages.ts rename to src/resources/beta/threads/messages.ts index 28026f3ff..f17b8508d 100644 --- a/src/resources/beta/threads/messages/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -3,13 +3,10 @@ import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; import { isRequestOptions } from 'openai/core'; -import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages'; -import * as FilesAPI from 'openai/resources/beta/threads/messages/files'; +import * as MessagesAPI from 'openai/resources/beta/threads/messages'; import { CursorPage, type CursorPageParams } from 'openai/pagination'; export class Messages extends APIResource { - files: FilesAPI.Files = new FilesAPI.Files(this._client); - /** * Create a message. */ @@ -21,7 +18,7 @@ export class Messages extends APIResource { return this._client.post(`/threads/${threadId}/messages`, { body, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -31,7 +28,7 @@ export class Messages extends APIResource { retrieve(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}/messages/${messageId}`, { ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -47,7 +44,7 @@ export class Messages extends APIResource { return this._client.post(`/threads/${threadId}/messages/${messageId}`, { body, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -71,7 +68,7 @@ export class Messages extends APIResource { return this._client.getAPIList(`/threads/${threadId}/messages`, MessagesPage, { query, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } } @@ -81,21 +78,21 @@ export class MessagesPage extends CursorPage {} /** * A citation within the message that points to a specific quote from a specific * File associated with the assistant or the message. Generated when the assistant - * uses the "retrieval" tool to search files. + * uses the "file_search" tool to search files. */ export type Annotation = FileCitationAnnotation | FilePathAnnotation; /** * A citation within the message that points to a specific quote from a specific * File associated with the assistant or the message. Generated when the assistant - * uses the "retrieval" tool to search files. + * uses the "file_search" tool to search files. */ export type AnnotationDelta = FileCitationDeltaAnnotation | FilePathDeltaAnnotation; /** * A citation within the message that points to a specific quote from a specific * File associated with the assistant or the message. Generated when the assistant - * uses the "retrieval" tool to search files. + * uses the "file_search" tool to search files. */ export interface FileCitationAnnotation { end_index: number; @@ -132,7 +129,7 @@ export namespace FileCitationAnnotation { /** * A citation within the message that points to a specific quote from a specific * File associated with the assistant or the message. Generated when the assistant - * uses the "retrieval" tool to search files. + * uses the "file_search" tool to search files. */ export interface FileCitationDeltaAnnotation { /** @@ -302,6 +299,11 @@ export interface Message { */ assistant_id: string | null; + /** + * A list of files attached to the message, and the tools they were added to. + */ + attachments: Array | null; + /** * The Unix timestamp (in seconds) for when the message was completed. */ @@ -317,13 +319,6 @@ export interface Message { */ created_at: number; - /** - * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs that - * the assistant should use. Useful for tools like retrieval and code_interpreter - * that can access files. A maximum of 10 files can be attached to a message. - */ - file_ids: Array; - /** * The Unix timestamp (in seconds) for when the message was marked as incomplete. */ @@ -373,6 +368,15 @@ export interface Message { } export namespace Message { + export interface Attachment { + add_to?: Array<'file_search' | 'code_interpreter'>; + + /** + * The ID of the file to attach to the message. + */ + file_id?: string; + } + /** * On an incomplete message, details about why the message is incomplete. */ @@ -413,13 +417,6 @@ export interface MessageDelta { */ content?: Array; - /** - * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs that - * the assistant should use. Useful for tools like retrieval and code_interpreter - * that can access files. A maximum of 10 files can be attached to a message. - */ - file_ids?: Array; - /** * The entity that produced the message. One of `user` or `assistant`. */ @@ -511,12 +508,9 @@ export interface MessageCreateParams { role: 'user' | 'assistant'; /** - * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - * the message should use. There can be a maximum of 10 files attached to a - * message. Useful for tools like `retrieval` and `code_interpreter` that can - * access and use files. + * A list of files attached to the message, and the tools they should be added to. */ - file_ids?: Array; + attachments?: Array | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -527,6 +521,17 @@ export interface MessageCreateParams { metadata?: unknown | null; } +export namespace MessageCreateParams { + export interface Attachment { + add_to?: Array<'file_search' | 'code_interpreter'>; + + /** + * The ID of the file to attach to the message. + */ + file_id?: string; + } +} + export interface MessageUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -583,8 +588,4 @@ export namespace Messages { export import MessageCreateParams = MessagesAPI.MessageCreateParams; export import MessageUpdateParams = MessagesAPI.MessageUpdateParams; export import MessageListParams = MessagesAPI.MessageListParams; - export import Files = FilesAPI.Files; - export import MessageFile = FilesAPI.MessageFile; - export import MessageFilesPage = FilesAPI.MessageFilesPage; - export import FileListParams = FilesAPI.FileListParams; } diff --git a/src/resources/beta/threads/messages/files.ts b/src/resources/beta/threads/messages/files.ts deleted file mode 100644 index 994b09d5f..000000000 --- a/src/resources/beta/threads/messages/files.ts +++ /dev/null @@ -1,105 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import * as FilesAPI from 'openai/resources/beta/threads/messages/files'; -import { CursorPage, type CursorPageParams } from 'openai/pagination'; - -export class Files extends APIResource { - /** - * Retrieves a message file. - */ - retrieve( - threadId: string, - messageId: string, - fileId: string, - options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.get(`/threads/${threadId}/messages/${messageId}/files/${fileId}`, { - ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, - }); - } - - /** - * Returns a list of message files. - */ - list( - threadId: string, - messageId: string, - query?: FileListParams, - options?: Core.RequestOptions, - ): Core.PagePromise; - list( - threadId: string, - messageId: string, - options?: Core.RequestOptions, - ): Core.PagePromise; - list( - threadId: string, - messageId: string, - query: FileListParams | Core.RequestOptions = {}, - options?: Core.RequestOptions, - ): Core.PagePromise { - if (isRequestOptions(query)) { - return this.list(threadId, messageId, {}, query); - } - return this._client.getAPIList(`/threads/${threadId}/messages/${messageId}/files`, MessageFilesPage, { - query, - ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, - }); - } -} - -export class MessageFilesPage extends CursorPage {} - -/** - * A list of files attached to a `message`. - */ -export interface MessageFile { - /** - * The identifier, which can be referenced in API endpoints. - */ - id: string; - - /** - * The Unix timestamp (in seconds) for when the message file was created. - */ - created_at: number; - - /** - * The ID of the [message](https://platform.openai.com/docs/api-reference/messages) - * that the [File](https://platform.openai.com/docs/api-reference/files) is - * attached to. - */ - message_id: string; - - /** - * The object type, which is always `thread.message.file`. - */ - object: 'thread.message.file'; -} - -export interface FileListParams extends CursorPageParams { - /** - * A cursor for use in pagination. `before` is an object ID that defines your place - * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. - */ - before?: string; - - /** - * Sort order by the `created_at` timestamp of the objects. `asc` for ascending - * order and `desc` for descending order. - */ - order?: 'asc' | 'desc'; -} - -export namespace Files { - export import MessageFile = FilesAPI.MessageFile; - export import MessageFilesPage = FilesAPI.MessageFilesPage; - export import FileListParams = FilesAPI.FileListParams; -} diff --git a/src/resources/beta/threads/messages/index.ts b/src/resources/beta/threads/messages/index.ts deleted file mode 100644 index ef446d012..000000000 --- a/src/resources/beta/threads/messages/index.ts +++ /dev/null @@ -1,30 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { - Annotation, - AnnotationDelta, - FileCitationAnnotation, - FileCitationDeltaAnnotation, - FilePathAnnotation, - FilePathDeltaAnnotation, - ImageFile, - ImageFileContentBlock, - ImageFileDelta, - ImageFileDeltaBlock, - Message, - MessageContent, - MessageContentDelta, - MessageDeleted, - MessageDelta, - MessageDeltaEvent, - Text, - TextContentBlock, - TextDelta, - TextDeltaBlock, - MessageCreateParams, - MessageUpdateParams, - MessageListParams, - MessagesPage, - Messages, -} from './messages'; -export { MessageFile, FileListParams, MessageFilesPage, Files } from './files'; diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts index c9b2d1ef5..d216195cb 100644 --- a/src/resources/beta/threads/runs/index.ts +++ b/src/resources/beta/threads/runs/index.ts @@ -5,11 +5,11 @@ export { CodeInterpreterOutputImage, CodeInterpreterToolCall, CodeInterpreterToolCallDelta, + FileSearchToolCall, + FileSearchToolCallDelta, FunctionToolCall, FunctionToolCallDelta, MessageCreationStepDetails, - RetrievalToolCall, - RetrievalToolCallDelta, RunStep, RunStepDelta, RunStepDeltaEvent, diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 4cfa6c36e..9e42f8a20 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -8,7 +8,7 @@ import { AssistantStream, RunCreateParamsBaseStream } from 'openai/lib/Assistant import { sleep } from 'openai/core'; import { RunSubmitToolOutputsParamsStream } from 'openai/lib/AssistantStream'; import * as RunsAPI from 'openai/resources/beta/threads/runs/runs'; -import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants'; +import * as AssistantsAPI from 'openai/resources/beta/assistants'; import * as ThreadsAPI from 'openai/resources/beta/threads/threads'; import * as StepsAPI from 'openai/resources/beta/threads/runs/steps'; import { CursorPage, type CursorPageParams } from 'openai/pagination'; @@ -39,7 +39,7 @@ export class Runs extends APIResource { return this._client.post(`/threads/${threadId}/runs`, { body, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, stream: body.stream ?? false, }) as APIPromise | APIPromise>; } @@ -50,7 +50,7 @@ export class Runs extends APIResource { retrieve(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}/runs/${runId}`, { ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -66,7 +66,7 @@ export class Runs extends APIResource { return this._client.post(`/threads/${threadId}/runs/${runId}`, { body, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -90,7 +90,7 @@ export class Runs extends APIResource { return this._client.getAPIList(`/threads/${threadId}/runs`, RunsPage, { query, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -100,7 +100,7 @@ export class Runs extends APIResource { cancel(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.post(`/threads/${threadId}/runs/${runId}/cancel`, { ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -224,7 +224,7 @@ export class Runs extends APIResource { return this._client.post(`/threads/${threadId}/runs/${runId}/submit_tool_outputs`, { body, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, stream: body.stream ?? false, }) as APIPromise | APIPromise>; } @@ -350,13 +350,6 @@ export interface Run { */ failed_at: number | null; - /** - * The list of [File](https://platform.openai.com/docs/api-reference/files) IDs the - * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for - * this run. - */ - file_ids: Array; - /** * Details on why the run is incomplete. Will be `null` if the run is not * incomplete. @@ -478,6 +471,11 @@ export interface Run { * The sampling temperature used for this run. If not set, defaults to 1. */ temperature?: number | null; + + /** + * The nucleus sampling value used for this run. If not set, defaults to 1. + */ + top_p?: number | null; } export namespace Run { @@ -720,6 +718,13 @@ export interface RunCreateParamsBase { */ tools?: Array | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + */ + top_p?: number | null; + truncation_strategy?: RunCreateParams.TruncationStrategy | null; } @@ -741,12 +746,9 @@ export namespace RunCreateParams { role: 'user' | 'assistant'; /** - * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - * the message should use. There can be a maximum of 10 files attached to a - * message. Useful for tools like `retrieval` and `code_interpreter` that can - * access and use files. + * A list of files attached to the message, and the tools they should be added to. */ - file_ids?: Array; + attachments?: Array | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -757,6 +759,17 @@ export namespace RunCreateParams { metadata?: unknown | null; } + export namespace AdditionalMessage { + export interface Attachment { + add_to?: Array<'file_search' | 'code_interpreter'>; + + /** + * The ID of the file to attach to the message. + */ + file_id?: string; + } + } + export interface TruncationStrategy { /** * The truncation strategy to use for the thread. The default is `auto`. If set to @@ -943,6 +956,13 @@ export interface RunCreateAndPollParams { */ tools?: Array | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + */ + top_p?: number | null; + truncation_strategy?: RunCreateAndPollParams.TruncationStrategy | null; } @@ -964,12 +984,9 @@ export namespace RunCreateAndPollParams { role: 'user' | 'assistant'; /** - * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - * the message should use. There can be a maximum of 10 files attached to a - * message. Useful for tools like `retrieval` and `code_interpreter` that can - * access and use files. + * A list of files attached to the message, and the tools they should be added to. */ - file_ids?: Array; + attachments?: Array | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -980,6 +997,17 @@ export namespace RunCreateAndPollParams { metadata?: unknown | null; } + export namespace AdditionalMessage { + export interface Attachment { + add_to?: Array<'file_search' | 'code_interpreter'>; + + /** + * The ID of the file to attach to the message. + */ + file_id?: string; + } + } + export interface TruncationStrategy { /** * The truncation strategy to use for the thread. The default is `auto`. If set to @@ -1119,6 +1147,13 @@ export interface RunCreateAndStreamParams { */ tools?: Array | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + */ + top_p?: number | null; + truncation_strategy?: RunCreateAndStreamParams.TruncationStrategy | null; } @@ -1140,12 +1175,9 @@ export namespace RunCreateAndStreamParams { role: 'user' | 'assistant'; /** - * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - * the message should use. There can be a maximum of 10 files attached to a - * message. Useful for tools like `retrieval` and `code_interpreter` that can - * access and use files. + * A list of files attached to the message, and the tools they should be added to. */ - file_ids?: Array; + attachments?: Array | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -1156,6 +1188,17 @@ export namespace RunCreateAndStreamParams { metadata?: unknown | null; } + export namespace AdditionalMessage { + export interface Attachment { + add_to?: Array<'file_search' | 'code_interpreter'>; + + /** + * The ID of the file to attach to the message. + */ + file_id?: string; + } + } + export interface TruncationStrategy { /** * The truncation strategy to use for the thread. The default is `auto`. If set to @@ -1295,6 +1338,13 @@ export interface RunStreamParams { */ tools?: Array | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + */ + top_p?: number | null; + truncation_strategy?: RunStreamParams.TruncationStrategy | null; } @@ -1316,12 +1366,9 @@ export namespace RunStreamParams { role: 'user' | 'assistant'; /** - * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - * the message should use. There can be a maximum of 10 files attached to a - * message. Useful for tools like `retrieval` and `code_interpreter` that can - * access and use files. + * A list of files attached to the message, and the tools they should be added to. */ - file_ids?: Array; + attachments?: Array | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -1332,6 +1379,17 @@ export namespace RunStreamParams { metadata?: unknown | null; } + export namespace AdditionalMessage { + export interface Attachment { + add_to?: Array<'file_search' | 'code_interpreter'>; + + /** + * The ID of the file to attach to the message. + */ + file_id?: string; + } + } + export interface TruncationStrategy { /** * The truncation strategy to use for the thread. The default is `auto`. If set to @@ -1470,11 +1528,11 @@ export namespace Runs { export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage; export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall; export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta; + export import FileSearchToolCall = StepsAPI.FileSearchToolCall; + export import FileSearchToolCallDelta = StepsAPI.FileSearchToolCallDelta; export import FunctionToolCall = StepsAPI.FunctionToolCall; export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta; export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails; - export import RetrievalToolCall = StepsAPI.RetrievalToolCall; - export import RetrievalToolCallDelta = StepsAPI.RetrievalToolCallDelta; export import RunStep = StepsAPI.RunStep; export import RunStepDelta = StepsAPI.RunStepDelta; export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent; diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index f0816fdb2..203741f4b 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -18,7 +18,7 @@ export class Steps extends APIResource { ): Core.APIPromise { return this._client.get(`/threads/${threadId}/runs/${runId}/steps/${stepId}`, { ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -48,7 +48,7 @@ export class Steps extends APIResource { return this._client.getAPIList(`/threads/${threadId}/runs/${runId}/steps`, RunStepsPage, { query, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } } @@ -220,6 +220,47 @@ export namespace CodeInterpreterToolCallDelta { } } +export interface FileSearchToolCall { + /** + * The ID of the tool call object. + */ + id: string; + + /** + * For now, this is always going to be an empty object. + */ + file_search: unknown; + + /** + * The type of tool call. This is always going to be `file_search` for this type of + * tool call. + */ + type: 'file_search'; +} + +export interface FileSearchToolCallDelta { + /** + * For now, this is always going to be an empty object. + */ + file_search: unknown; + + /** + * The index of the tool call in the tool calls array. + */ + index: number; + + /** + * The type of tool call. This is always going to be `file_search` for this type of + * tool call. + */ + type: 'file_search'; + + /** + * The ID of the tool call object. + */ + id?: string; +} + export interface FunctionToolCall { /** * The ID of the tool call object. @@ -330,47 +371,6 @@ export namespace MessageCreationStepDetails { } } -export interface RetrievalToolCall { - /** - * The ID of the tool call object. - */ - id: string; - - /** - * For now, this is always going to be an empty object. - */ - retrieval: unknown; - - /** - * The type of tool call. This is always going to be `retrieval` for this type of - * tool call. - */ - type: 'retrieval'; -} - -export interface RetrievalToolCallDelta { - /** - * The index of the tool call in the tool calls array. - */ - index: number; - - /** - * The type of tool call. This is always going to be `retrieval` for this type of - * tool call. - */ - type: 'retrieval'; - - /** - * The ID of the tool call object. - */ - id?: string; - - /** - * For now, this is always going to be an empty object. - */ - retrieval?: unknown; -} - /** * Represents a step in execution of a run. */ @@ -561,12 +561,12 @@ export namespace RunStepDeltaMessageDelta { /** * Details of the Code Interpreter tool call the run step was involved in. */ -export type ToolCall = CodeInterpreterToolCall | RetrievalToolCall | FunctionToolCall; +export type ToolCall = CodeInterpreterToolCall | FileSearchToolCall | FunctionToolCall; /** * Details of the Code Interpreter tool call the run step was involved in. */ -export type ToolCallDelta = CodeInterpreterToolCallDelta | RetrievalToolCallDelta | FunctionToolCallDelta; +export type ToolCallDelta = CodeInterpreterToolCallDelta | FileSearchToolCallDelta | FunctionToolCallDelta; /** * Details of the tool call. @@ -579,7 +579,7 @@ export interface ToolCallDeltaObject { /** * An array of tool calls the run step was involved in. These can be associated - * with one of three types of tools: `code_interpreter`, `retrieval`, or + * with one of three types of tools: `code_interpreter`, `file_search`, or * `function`. */ tool_calls?: Array; @@ -591,7 +591,7 @@ export interface ToolCallDeltaObject { export interface ToolCallsStepDetails { /** * An array of tool calls the run step was involved in. These can be associated - * with one of three types of tools: `code_interpreter`, `retrieval`, or + * with one of three types of tools: `code_interpreter`, `file_search`, or * `function`. */ tool_calls: Array; @@ -623,11 +623,11 @@ export namespace Steps { export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage; export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall; export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta; + export import FileSearchToolCall = StepsAPI.FileSearchToolCall; + export import FileSearchToolCallDelta = StepsAPI.FileSearchToolCallDelta; export import FunctionToolCall = StepsAPI.FunctionToolCall; export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta; export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails; - export import RetrievalToolCall = StepsAPI.RetrievalToolCall; - export import RetrievalToolCallDelta = StepsAPI.RetrievalToolCallDelta; export import RunStep = StepsAPI.RunStep; export import RunStepDelta = StepsAPI.RunStepDelta; export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent; diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 29682c308..f3590ed80 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -6,8 +6,8 @@ import { APIResource } from 'openai/resource'; import { isRequestOptions } from 'openai/core'; import { AssistantStream, ThreadCreateAndRunParamsBaseStream } from 'openai/lib/AssistantStream'; import * as ThreadsAPI from 'openai/resources/beta/threads/threads'; -import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants'; -import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages'; +import * as AssistantsAPI from 'openai/resources/beta/assistants'; +import * as MessagesAPI from 'openai/resources/beta/threads/messages'; import * as RunsAPI from 'openai/resources/beta/threads/runs/runs'; import { Stream } from 'openai/streaming'; @@ -30,7 +30,7 @@ export class Threads extends APIResource { return this._client.post('/threads', { body, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -40,7 +40,7 @@ export class Threads extends APIResource { retrieve(threadId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}`, { ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -51,7 +51,7 @@ export class Threads extends APIResource { return this._client.post(`/threads/${threadId}`, { body, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -61,7 +61,7 @@ export class Threads extends APIResource { del(threadId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/threads/${threadId}`, { ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } @@ -87,7 +87,7 @@ export class Threads extends APIResource { return this._client.post('/threads/runs', { body, ...options, - headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers }, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, stream: body.stream ?? false, }) as APIPromise | APIPromise>; } @@ -154,7 +154,7 @@ export interface AssistantToolChoice { /** * The type of the tool. If type is `function`, the function name must be set */ - type: 'function' | 'code_interpreter' | 'retrieval'; + type: 'function' | 'code_interpreter' | 'file_search'; function?: AssistantToolChoiceFunction; } @@ -203,6 +203,49 @@ export interface Thread { * The object type, which is always `thread`. */ object: 'thread'; + + /** + * A set of resources that are made available to the assistant's tools in this + * thread. The resources are specific to the type of tool. For example, the + * `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ + tool_resources: Thread.ToolResources | null; +} + +export namespace Thread { + /** + * A set of resources that are made available to the assistant's tools in this + * thread. The resources are specific to the type of tool. For example, the + * `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ + export interface ToolResources { + code_interpreter?: ToolResources.CodeInterpreter; + + file_search?: ToolResources.FileSearch; + } + + export namespace ToolResources { + export interface CodeInterpreter { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + file_ids?: Array; + } + + export interface FileSearch { + /** + * The + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this thread. There can be a maximum of 1 vector store attached to + * the thread. + */ + vector_store_ids?: Array; + } + } } export interface ThreadDeleted { @@ -227,6 +270,14 @@ export interface ThreadCreateParams { * characters long. */ metadata?: unknown | null; + + /** + * A set of resources that are made available to the assistant's tools in this + * thread. The resources are specific to the type of tool. For example, the + * `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ + tool_resources?: ThreadCreateParams.ToolResources | null; } export namespace ThreadCreateParams { @@ -247,12 +298,9 @@ export namespace ThreadCreateParams { role: 'user' | 'assistant'; /** - * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - * the message should use. There can be a maximum of 10 files attached to a - * message. Useful for tools like `retrieval` and `code_interpreter` that can - * access and use files. + * A list of files attached to the message, and the tools they should be added to. */ - file_ids?: Array; + attachments?: Array | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -262,6 +310,77 @@ export namespace ThreadCreateParams { */ metadata?: unknown | null; } + + export namespace Message { + export interface Attachment { + add_to?: Array<'file_search' | 'code_interpreter'>; + + /** + * The ID of the file to attach to the message. + */ + file_id?: string; + } + } + + /** + * A set of resources that are made available to the assistant's tools in this + * thread. The resources are specific to the type of tool. For example, the + * `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ + export interface ToolResources { + code_interpreter?: ToolResources.CodeInterpreter; + + file_search?: ToolResources.FileSearch; + } + + export namespace ToolResources { + export interface CodeInterpreter { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + file_ids?: Array; + } + + export interface FileSearch { + /** + * The + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this thread. There can be a maximum of 1 vector store attached to + * the thread. + */ + vector_store_ids?: Array; + + /** + * A helper to create a + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * with file_ids and attach it to this thread. There can be a maximum of 1 vector + * store attached to the thread. + */ + vector_stores?: Array; + } + + export namespace FileSearch { + export interface VectorStore { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + * add to the vector store. There can be a maximum of 10000 files in a vector + * store. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to a vector store. This can be + * useful for storing additional information about the vector store in a structured + * format. Keys can be a maximum of 64 characters long and values can be a maxium + * of 512 characters long. + */ + metadata?: unknown; + } + } + } } export interface ThreadUpdateParams { @@ -272,6 +391,49 @@ export interface ThreadUpdateParams { * characters long. */ metadata?: unknown | null; + + /** + * A set of resources that are made available to the assistant's tools in this + * thread. The resources are specific to the type of tool. For example, the + * `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ + tool_resources?: ThreadUpdateParams.ToolResources | null; +} + +export namespace ThreadUpdateParams { + /** + * A set of resources that are made available to the assistant's tools in this + * thread. The resources are specific to the type of tool. For example, the + * `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ + export interface ToolResources { + code_interpreter?: ToolResources.CodeInterpreter; + + file_search?: ToolResources.FileSearch; + } + + export namespace ToolResources { + export interface CodeInterpreter { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + file_ids?: Array; + } + + export interface FileSearch { + /** + * The + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this thread. There can be a maximum of 1 vector store attached to + * the thread. + */ + vector_store_ids?: Array; + } + } } export type ThreadCreateAndRunParams = @@ -296,7 +458,7 @@ export interface ThreadCreateAndRunParamsBase { * The maximum number of completion tokens that may be used over the course of the * run. The run will make a best effort to use only the number of completion tokens * specified, across multiple turns of the run. If the run exceeds the number of - * completion tokens specified, the run will end with status `complete`. See + * completion tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_completion_tokens?: number | null; @@ -305,7 +467,7 @@ export interface ThreadCreateAndRunParamsBase { * The maximum number of prompt tokens that may be used over the course of the run. * The run will make a best effort to use only the number of prompt tokens * specified, across multiple turns of the run. If the run exceeds the number of - * prompt tokens specified, the run will end with status `complete`. See + * prompt tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_prompt_tokens?: number | null; @@ -393,14 +555,29 @@ export interface ThreadCreateAndRunParamsBase { */ tool_choice?: AssistantToolChoiceOption | null; + /** + * A set of resources that are used by the assistant's tools. The resources are + * specific to the type of tool. For example, the `code_interpreter` tool requires + * a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + tool_resources?: ThreadCreateAndRunParams.ToolResources | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. */ tools?: Array< - AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool + AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool | AssistantsAPI.FunctionTool > | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + */ + top_p?: number | null; + truncation_strategy?: ThreadCreateAndRunParams.TruncationStrategy | null; } @@ -422,6 +599,14 @@ export namespace ThreadCreateAndRunParams { * characters long. */ metadata?: unknown | null; + + /** + * A set of resources that are made available to the assistant's tools in this + * thread. The resources are specific to the type of tool. For example, the + * `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ + tool_resources?: Thread.ToolResources | null; } export namespace Thread { @@ -442,12 +627,9 @@ export namespace ThreadCreateAndRunParams { role: 'user' | 'assistant'; /** - * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - * the message should use. There can be a maximum of 10 files attached to a - * message. Useful for tools like `retrieval` and `code_interpreter` that can - * access and use files. + * A list of files attached to the message, and the tools they should be added to. */ - file_ids?: Array; + attachments?: Array | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -457,6 +639,110 @@ export namespace ThreadCreateAndRunParams { */ metadata?: unknown | null; } + + export namespace Message { + export interface Attachment { + add_to?: Array<'file_search' | 'code_interpreter'>; + + /** + * The ID of the file to attach to the message. + */ + file_id?: string; + } + } + + /** + * A set of resources that are made available to the assistant's tools in this + * thread. The resources are specific to the type of tool. For example, the + * `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ + export interface ToolResources { + code_interpreter?: ToolResources.CodeInterpreter; + + file_search?: ToolResources.FileSearch; + } + + export namespace ToolResources { + export interface CodeInterpreter { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + file_ids?: Array; + } + + export interface FileSearch { + /** + * The + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this thread. There can be a maximum of 1 vector store attached to + * the thread. + */ + vector_store_ids?: Array; + + /** + * A helper to create a + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * with file_ids and attach it to this thread. There can be a maximum of 1 vector + * store attached to the thread. + */ + vector_stores?: Array; + } + + export namespace FileSearch { + export interface VectorStore { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + * add to the vector store. There can be a maximum of 10000 files in a vector + * store. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to a vector store. This can be + * useful for storing additional information about the vector store in a structured + * format. Keys can be a maximum of 64 characters long and values can be a maxium + * of 512 characters long. + */ + metadata?: unknown; + } + } + } + } + + /** + * A set of resources that are used by the assistant's tools. The resources are + * specific to the type of tool. For example, the `code_interpreter` tool requires + * a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + export interface ToolResources { + code_interpreter?: ToolResources.CodeInterpreter; + + file_search?: ToolResources.FileSearch; + } + + export namespace ToolResources { + export interface CodeInterpreter { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + file_ids?: Array; + } + + export interface FileSearch { + /** + * The ID of the + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this assistant. There can be a maximum of 1 vector store attached to + * the assistant. + */ + vector_store_ids?: Array; + } } export interface TruncationStrategy { @@ -515,7 +801,7 @@ export interface ThreadCreateAndRunPollParams { * The maximum number of completion tokens that may be used over the course of the * run. The run will make a best effort to use only the number of completion tokens * specified, across multiple turns of the run. If the run exceeds the number of - * completion tokens specified, the run will end with status `complete`. See + * completion tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_completion_tokens?: number | null; @@ -524,7 +810,7 @@ export interface ThreadCreateAndRunPollParams { * The maximum number of prompt tokens that may be used over the course of the run. * The run will make a best effort to use only the number of prompt tokens * specified, across multiple turns of the run. If the run exceeds the number of - * prompt tokens specified, the run will end with status `complete`. See + * prompt tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_prompt_tokens?: number | null; @@ -605,14 +891,29 @@ export interface ThreadCreateAndRunPollParams { */ tool_choice?: AssistantToolChoiceOption | null; + /** + * A set of resources that are used by the assistant's tools. The resources are + * specific to the type of tool. For example, the `code_interpreter` tool requires + * a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + tool_resources?: ThreadCreateAndRunPollParams.ToolResources | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. */ tools?: Array< - AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool + AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool | AssistantsAPI.FunctionTool > | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + */ + top_p?: number | null; + truncation_strategy?: ThreadCreateAndRunPollParams.TruncationStrategy | null; } @@ -634,6 +935,14 @@ export namespace ThreadCreateAndRunPollParams { * characters long. */ metadata?: unknown | null; + + /** + * A set of resources that are made available to the assistant's tools in this + * thread. The resources are specific to the type of tool. For example, the + * `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ + tool_resources?: Thread.ToolResources | null; } export namespace Thread { @@ -654,12 +963,9 @@ export namespace ThreadCreateAndRunPollParams { role: 'user' | 'assistant'; /** - * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - * the message should use. There can be a maximum of 10 files attached to a - * message. Useful for tools like `retrieval` and `code_interpreter` that can - * access and use files. + * A list of files attached to the message, and the tools they should be added to. */ - file_ids?: Array; + attachments?: Array | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -669,6 +975,110 @@ export namespace ThreadCreateAndRunPollParams { */ metadata?: unknown | null; } + + export namespace Message { + export interface Attachment { + add_to?: Array<'file_search' | 'code_interpreter'>; + + /** + * The ID of the file to attach to the message. + */ + file_id?: string; + } + } + + /** + * A set of resources that are made available to the assistant's tools in this + * thread. The resources are specific to the type of tool. For example, the + * `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ + export interface ToolResources { + code_interpreter?: ToolResources.CodeInterpreter; + + file_search?: ToolResources.FileSearch; + } + + export namespace ToolResources { + export interface CodeInterpreter { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + file_ids?: Array; + } + + export interface FileSearch { + /** + * The + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this thread. There can be a maximum of 1 vector store attached to + * the thread. + */ + vector_store_ids?: Array; + + /** + * A helper to create a + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * with file_ids and attach it to this thread. There can be a maximum of 1 vector + * store attached to the thread. + */ + vector_stores?: Array; + } + + export namespace FileSearch { + export interface VectorStore { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + * add to the vector store. There can be a maximum of 10000 files in a vector + * store. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to a vector store. This can be + * useful for storing additional information about the vector store in a structured + * format. Keys can be a maximum of 64 characters long and values can be a maxium + * of 512 characters long. + */ + metadata?: unknown; + } + } + } + } + + /** + * A set of resources that are used by the assistant's tools. The resources are + * specific to the type of tool. For example, the `code_interpreter` tool requires + * a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + export interface ToolResources { + code_interpreter?: ToolResources.CodeInterpreter; + + file_search?: ToolResources.FileSearch; + } + + export namespace ToolResources { + export interface CodeInterpreter { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + file_ids?: Array; + } + + export interface FileSearch { + /** + * The ID of the + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this assistant. There can be a maximum of 1 vector store attached to + * the assistant. + */ + vector_store_ids?: Array; + } } export interface TruncationStrategy { @@ -706,7 +1116,7 @@ export interface ThreadCreateAndRunStreamParams { * The maximum number of completion tokens that may be used over the course of the * run. The run will make a best effort to use only the number of completion tokens * specified, across multiple turns of the run. If the run exceeds the number of - * completion tokens specified, the run will end with status `complete`. See + * completion tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_completion_tokens?: number | null; @@ -715,7 +1125,7 @@ export interface ThreadCreateAndRunStreamParams { * The maximum number of prompt tokens that may be used over the course of the run. * The run will make a best effort to use only the number of prompt tokens * specified, across multiple turns of the run. If the run exceeds the number of - * prompt tokens specified, the run will end with status `complete`. See + * prompt tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_prompt_tokens?: number | null; @@ -796,14 +1206,29 @@ export interface ThreadCreateAndRunStreamParams { */ tool_choice?: AssistantToolChoiceOption | null; + /** + * A set of resources that are used by the assistant's tools. The resources are + * specific to the type of tool. For example, the `code_interpreter` tool requires + * a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + tool_resources?: ThreadCreateAndRunStreamParams.ToolResources | null; + /** * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. */ tools?: Array< - AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool + AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool | AssistantsAPI.FunctionTool > | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + */ + top_p?: number | null; + truncation_strategy?: ThreadCreateAndRunStreamParams.TruncationStrategy | null; } @@ -825,6 +1250,14 @@ export namespace ThreadCreateAndRunStreamParams { * characters long. */ metadata?: unknown | null; + + /** + * A set of resources that are made available to the assistant's tools in this + * thread. The resources are specific to the type of tool. For example, the + * `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ + tool_resources?: Thread.ToolResources | null; } export namespace Thread { @@ -845,12 +1278,9 @@ export namespace ThreadCreateAndRunStreamParams { role: 'user' | 'assistant'; /** - * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that - * the message should use. There can be a maximum of 10 files attached to a - * message. Useful for tools like `retrieval` and `code_interpreter` that can - * access and use files. + * A list of files attached to the message, and the tools they should be added to. */ - file_ids?: Array; + attachments?: Array | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -860,6 +1290,110 @@ export namespace ThreadCreateAndRunStreamParams { */ metadata?: unknown | null; } + + export namespace Message { + export interface Attachment { + add_to?: Array<'file_search' | 'code_interpreter'>; + + /** + * The ID of the file to attach to the message. + */ + file_id?: string; + } + } + + /** + * A set of resources that are made available to the assistant's tools in this + * thread. The resources are specific to the type of tool. For example, the + * `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ + export interface ToolResources { + code_interpreter?: ToolResources.CodeInterpreter; + + file_search?: ToolResources.FileSearch; + } + + export namespace ToolResources { + export interface CodeInterpreter { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + file_ids?: Array; + } + + export interface FileSearch { + /** + * The + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this thread. There can be a maximum of 1 vector store attached to + * the thread. + */ + vector_store_ids?: Array; + + /** + * A helper to create a + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * with file_ids and attach it to this thread. There can be a maximum of 1 vector + * store attached to the thread. + */ + vector_stores?: Array; + } + + export namespace FileSearch { + export interface VectorStore { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + * add to the vector store. There can be a maximum of 10000 files in a vector + * store. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to a vector store. This can be + * useful for storing additional information about the vector store in a structured + * format. Keys can be a maximum of 64 characters long and values can be a maxium + * of 512 characters long. + */ + metadata?: unknown; + } + } + } + } + + /** + * A set of resources that are used by the assistant's tools. The resources are + * specific to the type of tool. For example, the `code_interpreter` tool requires + * a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + export interface ToolResources { + code_interpreter?: ToolResources.CodeInterpreter; + + file_search?: ToolResources.FileSearch; + } + + export namespace ToolResources { + export interface CodeInterpreter { + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + file_ids?: Array; + } + + export interface FileSearch { + /** + * The ID of the + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this assistant. There can be a maximum of 1 vector store attached to + * the assistant. + */ + vector_store_ids?: Array; + } } export interface TruncationStrategy { diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts new file mode 100644 index 000000000..3ccdd0108 --- /dev/null +++ b/src/resources/beta/vector-stores/file-batches.ts @@ -0,0 +1,292 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import * as Core from 'openai/core'; +import { APIResource } from 'openai/resource'; +import { isRequestOptions } from 'openai/core'; +import { sleep } from 'openai/core'; +import { Uploadable } from 'openai/core'; +import { allSettledWithThrow } from 'openai/lib/Util'; +import * as FileBatchesAPI from 'openai/resources/beta/vector-stores/file-batches'; +import * as FilesAPI from 'openai/resources/beta/vector-stores/files'; +import { VectorStoreFilesPage } from 'openai/resources/beta/vector-stores/files'; +import { type CursorPageParams } from 'openai/pagination'; + +export class FileBatches extends APIResource { + /** + * Create a vector store file batch. + */ + create( + vectorStoreId: string, + body: FileBatchCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/vector_stores/${vectorStoreId}/file_batches`, { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + + /** + * Retrieves a vector store file batch. + */ + retrieve( + vectorStoreId: string, + batchId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/vector_stores/${vectorStoreId}/file_batches/${batchId}`, { + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + + /** + * Cancel a vector store file batch. This attempts to cancel the processing of + * files in this batch as soon as possible. + */ + cancel( + vectorStoreId: string, + batchId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/vector_stores/${vectorStoreId}/file_batches/${batchId}/cancel`, { + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + + /** + * Create a vector store batch and poll until all files have been processed. + */ + async createAndPoll( + vectorStoreId: string, + body: FileBatchCreateParams, + options?: Core.RequestOptions & { pollIntervalMs?: number }, + ): Promise { + const batch = await this.create(vectorStoreId, body); + return await this.poll(vectorStoreId, batch.id, options); + } + + /** + * Returns a list of vector store files in a batch. + */ + listFiles( + vectorStoreId: string, + batchId: string, + query?: FileBatchListFilesParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + listFiles( + vectorStoreId: string, + batchId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + listFiles( + vectorStoreId: string, + batchId: string, + query: FileBatchListFilesParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.listFiles(vectorStoreId, batchId, {}, query); + } + return this._client.getAPIList( + `/vector_stores/${vectorStoreId}/file_batches/${batchId}/files`, + VectorStoreFilesPage, + { query, ...options, headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers } }, + ); + } + + /** + * Wait for the given file batch to be processed. + * + * Note: this will return even if one of the files failed to process, you need to + * check batch.file_counts.failed_count to handle this case. + */ + async poll( + vectorStoreId: string, + batchId: string, + options?: Core.RequestOptions & { pollIntervalMs?: number }, + ): Promise { + const headers: { [key: string]: string } = { ...options?.headers, 'X-Stainless-Poll-Helper': 'true' }; + if (options?.pollIntervalMs) { + headers['X-Stainless-Custom-Poll-Interval'] = options.pollIntervalMs.toString(); + } + + while (true) { + const { data: batch, response } = await this.retrieve(vectorStoreId, batchId, { + ...options, + headers, + }).withResponse(); + + switch (batch.status) { + case 'in_progress': + let sleepInterval = 5000; + + if (options?.pollIntervalMs) { + sleepInterval = options.pollIntervalMs; + } else { + const headerInterval = response.headers.get('openai-poll-after-ms'); + if (headerInterval) { + const headerIntervalMs = parseInt(headerInterval); + if (!isNaN(headerIntervalMs)) { + sleepInterval = headerIntervalMs; + } + } + } + await sleep(sleepInterval); + break; + case 'failed': + case 'completed': + return batch; + } + } + } + + /** + * Uploads the given files concurrently and then creates a vector store file batch. + * + * The concurrency limit is configurable using the `maxConcurrency` parameter. + */ + async uploadAndPoll( + vectorStoreId: string, + { files, fileIds = [] }: { files: Uploadable[]; fileIds?: string[] }, + options?: Core.RequestOptions & { pollIntervalMs?: number; maxConcurrency?: number }, + ): Promise { + if (files === null || files.length == 0) { + throw new Error('No files provided to process.'); + } + + const configuredConcurrency = options?.maxConcurrency ?? 5; + //We cap the number of workers at the number of files (so we don't start any unnecessary workers) + const concurrencyLimit = Math.min(configuredConcurrency, files.length); + + const client = this._client; + const fileIterator = files.values(); + const allFileIds: string[] = [...fileIds]; + + //This code is based on this design. The libraries don't accommodate our environment limits. + // https://stackoverflow.com/questions/40639432/what-is-the-best-way-to-limit-concurrency-when-using-es6s-promise-all + async function processFiles(iterator: IterableIterator) { + for (let item of iterator) { + const fileObj = await client.files.create({ file: item, purpose: 'assistants' }, options); + allFileIds.push(fileObj.id); + } + } + + //Start workers to process results + const workers = Array(concurrencyLimit).fill(fileIterator).map(processFiles); + + //Wait for all processing to complete. + await allSettledWithThrow(workers); + + return await this.createAndPoll(vectorStoreId, { + file_ids: allFileIds, + }); + } +} + +/** + * A batch of files attached to a vector store. + */ +export interface VectorStoreFileBatch { + /** + * The identifier, which can be referenced in API endpoints. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the vector store files batch was + * created. + */ + created_at: number; + + file_counts: VectorStoreFileBatch.FileCounts; + + /** + * The object type, which is always `vector_store.file_batch`. + */ + object: 'vector_store.files_batch'; + + /** + * The status of the vector store files batch, which can be either `in_progress`, + * `completed`, `cancelled` or `failed`. + */ + status: 'in_progress' | 'completed' | 'cancelled' | 'failed'; + + /** + * The ID of the + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * that the [File](https://platform.openai.com/docs/api-reference/files) is + * attached to. + */ + vector_store_id: string; +} + +export namespace VectorStoreFileBatch { + export interface FileCounts { + /** + * The number of files that where cancelled. + */ + cancelled: number; + + /** + * The number of files that have been processed. + */ + completed: number; + + /** + * The number of files that have failed to process. + */ + failed: number; + + /** + * The number of files that are currently being processed. + */ + in_progress: number; + + /** + * The total number of files. + */ + total: number; + } +} + +export interface FileBatchCreateParams { + /** + * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + * the vector store should use. Useful for tools like `file_search` that can access + * files. + */ + file_ids: Array; +} + +export interface FileBatchListFilesParams extends CursorPageParams { + /** + * A cursor for use in pagination. `before` is an object ID that defines your place + * in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include before=obj_foo in order to + * fetch the previous page of the list. + */ + before?: string; + + /** + * Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + */ + filter?: 'in_progress' | 'completed' | 'failed' | 'cancelled'; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending + * order and `desc` for descending order. + */ + order?: 'asc' | 'desc'; +} + +export namespace FileBatches { + export import VectorStoreFileBatch = FileBatchesAPI.VectorStoreFileBatch; + export import FileBatchCreateParams = FileBatchesAPI.FileBatchCreateParams; + export import FileBatchListFilesParams = FileBatchesAPI.FileBatchListFilesParams; +} + +export { VectorStoreFilesPage }; diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts new file mode 100644 index 000000000..40b97e9a9 --- /dev/null +++ b/src/resources/beta/vector-stores/files.ts @@ -0,0 +1,277 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import * as Core from 'openai/core'; +import { APIResource } from 'openai/resource'; +import { isRequestOptions } from 'openai/core'; +import { sleep, Uploadable } from 'openai/core'; +import * as FilesAPI from 'openai/resources/beta/vector-stores/files'; +import { CursorPage, type CursorPageParams } from 'openai/pagination'; + +export class Files extends APIResource { + /** + * Create a vector store file by attaching a + * [File](https://platform.openai.com/docs/api-reference/files) to a + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object). + */ + create( + vectorStoreId: string, + body: FileCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/vector_stores/${vectorStoreId}/files`, { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + + /** + * Retrieves a vector store file. + */ + retrieve( + vectorStoreId: string, + fileId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/vector_stores/${vectorStoreId}/files/${fileId}`, { + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + + /** + * Returns a list of vector store files. + */ + list( + vectorStoreId: string, + query?: FileListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + vectorStoreId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + vectorStoreId: string, + query: FileListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(vectorStoreId, {}, query); + } + return this._client.getAPIList(`/vector_stores/${vectorStoreId}/files`, VectorStoreFilesPage, { + query, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + + /** + * Delete a vector store file. This will remove the file from the vector store but + * the file itself will not be deleted. To delete the file, use the + * [delete file](https://platform.openai.com/docs/api-reference/files/delete) + * endpoint. + */ + del( + vectorStoreId: string, + fileId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.delete(`/vector_stores/${vectorStoreId}/files/${fileId}`, { + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + + /** + * Attach a file to the given vector store and wait for it to be processed. + */ + async createAndPoll( + vectorStoreId: string, + body: FileCreateParams, + options?: Core.RequestOptions & { pollIntervalMs?: number }, + ): Promise { + const file = await this.create(vectorStoreId, body, options); + return await this.poll(vectorStoreId, file.id, options); + } + + /** + * Wait for the vector store file to finish processing. + * + * Note: this will return even if the file failed to process, you need to check + * file.last_error and file.status to handle these cases + */ + async poll( + vectorStoreId: string, + fileId: string, + options?: Core.RequestOptions & { pollIntervalMs?: number }, + ): Promise { + const headers: { [key: string]: string } = { ...options?.headers, 'X-Stainless-Poll-Helper': 'true' }; + if (options?.pollIntervalMs) { + headers['X-Stainless-Custom-Poll-Interval'] = options.pollIntervalMs.toString(); + } + while (true) { + const fileResponse = await this.retrieve(vectorStoreId, fileId, { + ...options, + headers, + }).withResponse(); + + const file = fileResponse.data; + + switch (file.status) { + case 'in_progress': + let sleepInterval = 5000; + + if (options?.pollIntervalMs) { + sleepInterval = options.pollIntervalMs; + } else { + const headerInterval = fileResponse.response.headers.get('openai-poll-after-ms'); + if (headerInterval) { + const headerIntervalMs = parseInt(headerInterval); + if (!isNaN(headerIntervalMs)) { + sleepInterval = headerIntervalMs; + } + } + } + await sleep(sleepInterval); + break; + case 'failed': + case 'completed': + return file; + } + } + } + + /** + * Upload a file to the `files` API and then attach it to the given vector store. + * Note the file will be asynchronously processed (you can use the alternative + * polling helper method to wait for processing to complete). + */ + async upload( + vectorStoreId: string, + file: Uploadable, + options?: Core.RequestOptions, + ): Promise { + const fileInfo = await this._client.files.create({ file: file, purpose: 'assistants' }, options); + return this.create(vectorStoreId, { file_id: fileInfo.id }, options); + } + + /** + * Add a file to a vector store and poll until processing is complete. + */ + async uploadAndPoll( + vectorStoreId: string, + file: Uploadable, + options?: Core.RequestOptions & { pollIntervalMs?: number }, + ): Promise { + const fileInfo = await this._client.files.create({ file: file, purpose: 'assistants' }, options); + return await this.poll(vectorStoreId, fileInfo.id, options); + } +} + +export class VectorStoreFilesPage extends CursorPage {} + +/** + * A list of files attached to a vector store. + */ +export interface VectorStoreFile { + /** + * The identifier, which can be referenced in API endpoints. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the vector store file was created. + */ + created_at: number; + + /** + * The last error associated with this vector store file. Will be `null` if there + * are no errors. + */ + last_error: VectorStoreFile.LastError | null; + + /** + * The object type, which is always `vector_store.file`. + */ + object: 'vector_store.file'; + + /** + * The status of the vector store file, which can be either `in_progress`, + * `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + * vector store file is ready for use. + */ + status: 'in_progress' | 'completed' | 'cancelled' | 'failed'; + + /** + * The ID of the + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * that the [File](https://platform.openai.com/docs/api-reference/files) is + * attached to. + */ + vector_store_id: string; +} + +export namespace VectorStoreFile { + /** + * The last error associated with this vector store file. Will be `null` if there + * are no errors. + */ + export interface LastError { + /** + * One of `server_error` or `rate_limit_exceeded`. + */ + code: 'internal_error' | 'file_not_found' | 'parsing_error' | 'unhandled_mime_type'; + + /** + * A human-readable description of the error. + */ + message: string; + } +} + +export interface VectorStoreFileDeleted { + id: string; + + deleted: boolean; + + object: 'vector_store.file.deleted'; +} + +export interface FileCreateParams { + /** + * A [File](https://platform.openai.com/docs/api-reference/files) ID that the + * vector store should use. Useful for tools like `file_search` that can access + * files. + */ + file_id: string; +} + +export interface FileListParams extends CursorPageParams { + /** + * A cursor for use in pagination. `before` is an object ID that defines your place + * in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include before=obj_foo in order to + * fetch the previous page of the list. + */ + before?: string; + + /** + * Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. + */ + filter?: 'in_progress' | 'completed' | 'failed' | 'cancelled'; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending + * order and `desc` for descending order. + */ + order?: 'asc' | 'desc'; +} + +export namespace Files { + export import VectorStoreFile = FilesAPI.VectorStoreFile; + export import VectorStoreFileDeleted = FilesAPI.VectorStoreFileDeleted; + export import VectorStoreFilesPage = FilesAPI.VectorStoreFilesPage; + export import FileCreateParams = FilesAPI.FileCreateParams; + export import FileListParams = FilesAPI.FileListParams; +} diff --git a/src/resources/beta/vector-stores/index.ts b/src/resources/beta/vector-stores/index.ts new file mode 100644 index 000000000..8fb787ccd --- /dev/null +++ b/src/resources/beta/vector-stores/index.ts @@ -0,0 +1,25 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + VectorStore, + VectorStoreDeleted, + VectorStoreCreateParams, + VectorStoreUpdateParams, + VectorStoreListParams, + VectorStoresPage, + VectorStores, +} from './vector-stores'; +export { + VectorStoreFile, + VectorStoreFileDeleted, + FileCreateParams, + FileListParams, + VectorStoreFilesPage, + Files, +} from './files'; +export { + VectorStoreFileBatch, + FileBatchCreateParams, + FileBatchListFilesParams, + FileBatches, +} from './file-batches'; diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts new file mode 100644 index 000000000..892d06aa4 --- /dev/null +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -0,0 +1,318 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import * as Core from 'openai/core'; +import { APIResource } from 'openai/resource'; +import { isRequestOptions } from 'openai/core'; +import * as VectorStoresAPI from 'openai/resources/beta/vector-stores/vector-stores'; +import * as FileBatchesAPI from 'openai/resources/beta/vector-stores/file-batches'; +import * as FilesAPI from 'openai/resources/beta/vector-stores/files'; +import { CursorPage, type CursorPageParams } from 'openai/pagination'; + +export class VectorStores extends APIResource { + files: FilesAPI.Files = new FilesAPI.Files(this._client); + fileBatches: FileBatchesAPI.FileBatches = new FileBatchesAPI.FileBatches(this._client); + + /** + * Create a vector store. + */ + create(body: VectorStoreCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/vector_stores', { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + + /** + * Retrieves a vector store. + */ + retrieve(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/vector_stores/${vectorStoreId}`, { + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + + /** + * Modifies a vector store. + */ + update( + vectorStoreId: string, + body: VectorStoreUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/vector_stores/${vectorStoreId}`, { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + + /** + * Returns a list of vector stores. + */ + list( + query?: VectorStoreListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list(options?: Core.RequestOptions): Core.PagePromise; + list( + query: VectorStoreListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.getAPIList('/vector_stores', VectorStoresPage, { + query, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + + /** + * Delete a vector store. + */ + del(vectorStoreId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/vector_stores/${vectorStoreId}`, { + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } +} + +export class VectorStoresPage extends CursorPage {} + +/** + * A vector store is a collection of processed files can be used by the + * `file_search` tool. + */ +export interface VectorStore { + /** + * The identifier, which can be referenced in API endpoints. + */ + id: string; + + /** + * The byte size of the vector store. + */ + bytes: number; + + /** + * The Unix timestamp (in seconds) for when the vector store was created. + */ + created_at: number; + + file_counts: VectorStore.FileCounts; + + /** + * The Unix timestamp (in seconds) for when the vector store was last active. + */ + last_active_at: number | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata: unknown | null; + + /** + * The name of the vector store. + */ + name: string; + + /** + * The object type, which is always `vector_store`. + */ + object: 'vector_store'; + + /** + * The status of the vector store, which can be either `expired`, `in_progress`, or + * `completed`. A status of `completed` indicates that the vector store is ready + * for use. + */ + status: 'expired' | 'in_progress' | 'completed'; + + /** + * The expiration policy for a vector store. + */ + expires_after?: VectorStore.ExpiresAfter; + + /** + * The Unix timestamp (in seconds) for when the vector store will expire. + */ + expires_at?: number | null; +} + +export namespace VectorStore { + export interface FileCounts { + /** + * The number of files that were cancelled. + */ + cancelled: number; + + /** + * The number of files that have been successfully processed. + */ + completed: number; + + /** + * The number of files that have failed to process. + */ + failed: number; + + /** + * The number of files that are currently being processed. + */ + in_progress: number; + + /** + * The total number of files. + */ + total: number; + } + + /** + * The expiration policy for a vector store. + */ + export interface ExpiresAfter { + /** + * Anchor timestamp after which the expiration policy applies. Supported anchors: + * `last_active_at`. + */ + anchor: 'last_active_at'; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; + } +} + +export interface VectorStoreDeleted { + id: string; + + deleted: boolean; + + object: 'vector_store.deleted'; +} + +export interface VectorStoreCreateParams { + /** + * The expiration policy for a vector store. + */ + expires_after?: VectorStoreCreateParams.ExpiresAfter; + + /** + * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + * the vector store should use. Useful for tools like `file_search` that can access + * files. + */ + file_ids?: Array; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + + /** + * The name of the vector store. + */ + name?: string; +} + +export namespace VectorStoreCreateParams { + /** + * The expiration policy for a vector store. + */ + export interface ExpiresAfter { + /** + * Anchor timestamp after which the expiration policy applies. Supported anchors: + * `last_active_at`. + */ + anchor: 'last_active_at'; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; + } +} + +export interface VectorStoreUpdateParams { + /** + * The expiration policy for a vector store. + */ + expires_after?: VectorStoreUpdateParams.ExpiresAfter | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maxium of 512 + * characters long. + */ + metadata?: unknown | null; + + /** + * The name of the vector store. + */ + name?: string | null; +} + +export namespace VectorStoreUpdateParams { + /** + * The expiration policy for a vector store. + */ + export interface ExpiresAfter { + /** + * Anchor timestamp after which the expiration policy applies. Supported anchors: + * `last_active_at`. + */ + anchor: 'last_active_at'; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; + } +} + +export interface VectorStoreListParams extends CursorPageParams { + /** + * A cursor for use in pagination. `before` is an object ID that defines your place + * in the list. For instance, if you make a list request and receive 100 objects, + * ending with obj_foo, your subsequent call can include before=obj_foo in order to + * fetch the previous page of the list. + */ + before?: string; + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending + * order and `desc` for descending order. + */ + order?: 'asc' | 'desc'; +} + +export namespace VectorStores { + export import VectorStore = VectorStoresAPI.VectorStore; + export import VectorStoreDeleted = VectorStoresAPI.VectorStoreDeleted; + export import VectorStoresPage = VectorStoresAPI.VectorStoresPage; + export import VectorStoreCreateParams = VectorStoresAPI.VectorStoreCreateParams; + export import VectorStoreUpdateParams = VectorStoresAPI.VectorStoreUpdateParams; + export import VectorStoreListParams = VectorStoresAPI.VectorStoreListParams; + export import Files = FilesAPI.Files; + export import VectorStoreFile = FilesAPI.VectorStoreFile; + export import VectorStoreFileDeleted = FilesAPI.VectorStoreFileDeleted; + export import VectorStoreFilesPage = FilesAPI.VectorStoreFilesPage; + export import FileCreateParams = FilesAPI.FileCreateParams; + export import FileListParams = FilesAPI.FileListParams; + export import FileBatches = FileBatchesAPI.FileBatches; + export import VectorStoreFileBatch = FileBatchesAPI.VectorStoreFileBatch; + export import FileBatchCreateParams = FileBatchesAPI.FileBatchCreateParams; + export import FileBatchListFilesParams = FileBatchesAPI.FileBatchListFilesParams; +} diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 10b3d38d2..2469cce07 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -300,7 +300,7 @@ export interface JobCreateParams { /** * The ID of an uploaded file that contains training data. * - * See [upload file](https://platform.openai.com/docs/api-reference/files/upload) + * See [upload file](https://platform.openai.com/docs/api-reference/files/create) * for how to upload a file. * * Your dataset must be formatted as a JSONL file. Additionally, you must upload diff --git a/tests/api-resources/beta/assistants/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts similarity index 93% rename from tests/api-resources/beta/assistants/assistants.test.ts rename to tests/api-resources/beta/assistants.test.ts index 62282148d..56ce8446a 100644 --- a/tests/api-resources/beta/assistants/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -24,11 +24,20 @@ describe('resource assistants', () => { const response = await openai.beta.assistants.create({ model: 'gpt-4-turbo', description: 'string', - file_ids: ['string', 'string', 'string'], instructions: 'string', metadata: {}, name: 'string', + response_format: 'none', + temperature: 1, + tool_resources: { + code_interpreter: { file_ids: ['string', 'string', 'string'] }, + file_search: { + vector_store_ids: ['string'], + vector_stores: [{ file_ids: ['string', 'string', 'string'], metadata: {} }], + }, + }, tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + top_p: 1, }); }); diff --git a/tests/api-resources/beta/threads/messages/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts similarity index 93% rename from tests/api-resources/beta/threads/messages/messages.test.ts rename to tests/api-resources/beta/threads/messages.test.ts index 7f62944e0..a0a025869 100644 --- a/tests/api-resources/beta/threads/messages/messages.test.ts +++ b/tests/api-resources/beta/threads/messages.test.ts @@ -24,7 +24,11 @@ describe('resource messages', () => { const response = await openai.beta.threads.messages.create('string', { content: 'x', role: 'user', - file_ids: ['string'], + attachments: [ + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + ], metadata: {}, }); }); diff --git a/tests/api-resources/beta/threads/messages/files.test.ts b/tests/api-resources/beta/threads/messages/files.test.ts deleted file mode 100644 index 58c8813fe..000000000 --- a/tests/api-resources/beta/threads/messages/files.test.ts +++ /dev/null @@ -1,65 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import OpenAI from 'openai'; -import { Response } from 'node-fetch'; - -const openai = new OpenAI({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', -}); - -describe('resource files', () => { - test('retrieve', async () => { - const responsePromise = openai.beta.threads.messages.files.retrieve( - 'thread_abc123', - 'msg_abc123', - 'file-abc123', - ); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('retrieve: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - openai.beta.threads.messages.files.retrieve('thread_abc123', 'msg_abc123', 'file-abc123', { - path: '/_stainless_unknown_path', - }), - ).rejects.toThrow(OpenAI.NotFoundError); - }); - - test('list', async () => { - const responsePromise = openai.beta.threads.messages.files.list('string', 'string'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('list: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - openai.beta.threads.messages.files.list('string', 'string', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(OpenAI.NotFoundError); - }); - - test('list: request options and params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - openai.beta.threads.messages.files.list( - 'string', - 'string', - { after: 'string', before: 'string', limit: 0, order: 'asc' }, - { path: '/_stainless_unknown_path' }, - ), - ).rejects.toThrow(OpenAI.NotFoundError); - }); -}); diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 2489d56e2..4a3743ca0 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -25,9 +25,36 @@ describe('resource runs', () => { assistant_id: 'string', additional_instructions: 'string', additional_messages: [ - { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, - { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, - { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, + { + role: 'user', + content: 'x', + attachments: [ + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + ], + metadata: {}, + }, + { + role: 'user', + content: 'x', + attachments: [ + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + ], + metadata: {}, + }, + { + role: 'user', + content: 'x', + attachments: [ + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + ], + metadata: {}, + }, ], instructions: 'string', max_completion_tokens: 256, @@ -39,6 +66,7 @@ describe('resource runs', () => { temperature: 1, tool_choice: 'none', tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + top_p: 1, truncation_strategy: { type: 'auto', last_messages: 1 }, }); }); diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index 028a150f4..0a5f70af4 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -33,11 +33,45 @@ describe('resource threads', () => { openai.beta.threads.create( { messages: [ - { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, - { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, - { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, + { + role: 'user', + content: 'x', + attachments: [ + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + ], + metadata: {}, + }, + { + role: 'user', + content: 'x', + attachments: [ + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + ], + metadata: {}, + }, + { + role: 'user', + content: 'x', + attachments: [ + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + ], + metadata: {}, + }, ], metadata: {}, + tool_resources: { + code_interpreter: { file_ids: ['string', 'string', 'string'] }, + file_search: { + vector_store_ids: ['string'], + vector_stores: [{ file_ids: ['string', 'string', 'string'], metadata: {} }], + }, + }, }, { path: '/_stainless_unknown_path' }, ), @@ -115,14 +149,53 @@ describe('resource threads', () => { temperature: 1, thread: { messages: [ - { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, - { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, - { role: 'user', content: 'x', file_ids: ['string'], metadata: {} }, + { + role: 'user', + content: 'x', + attachments: [ + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + ], + metadata: {}, + }, + { + role: 'user', + content: 'x', + attachments: [ + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + ], + metadata: {}, + }, + { + role: 'user', + content: 'x', + attachments: [ + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + ], + metadata: {}, + }, ], + tool_resources: { + code_interpreter: { file_ids: ['string', 'string', 'string'] }, + file_search: { + vector_store_ids: ['string'], + vector_stores: [{ file_ids: ['string', 'string', 'string'], metadata: {} }], + }, + }, metadata: {}, }, tool_choice: 'none', + tool_resources: { + code_interpreter: { file_ids: ['string', 'string', 'string'] }, + file_search: { vector_store_ids: ['string'] }, + }, tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + top_p: 1, truncation_strategy: { type: 'auto', last_messages: 1 }, }); }); diff --git a/tests/api-resources/beta/vector-stores/file-batches.test.ts b/tests/api-resources/beta/vector-stores/file-batches.test.ts new file mode 100644 index 000000000..782b33a0c --- /dev/null +++ b/tests/api-resources/beta/vector-stores/file-batches.test.ts @@ -0,0 +1,98 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const openai = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource fileBatches', () => { + test('create: only required params', async () => { + const responsePromise = openai.beta.vectorStores.fileBatches.create('vs_abc123', { + file_ids: ['string'], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await openai.beta.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'] }); + }); + + test('retrieve', async () => { + const responsePromise = openai.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + openai.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('cancel', async () => { + const responsePromise = openai.beta.vectorStores.fileBatches.cancel('string', 'string'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('cancel: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + openai.beta.vectorStores.fileBatches.cancel('string', 'string', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('listFiles', async () => { + const responsePromise = openai.beta.vectorStores.fileBatches.listFiles('string', 'string'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('listFiles: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + openai.beta.vectorStores.fileBatches.listFiles('string', 'string', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('listFiles: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + openai.beta.vectorStores.fileBatches.listFiles( + 'string', + 'string', + { after: 'string', before: 'string', filter: 'in_progress', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/beta/assistants/files.test.ts b/tests/api-resources/beta/vector-stores/files.test.ts similarity index 77% rename from tests/api-resources/beta/assistants/files.test.ts rename to tests/api-resources/beta/vector-stores/files.test.ts index e285b4664..03340753c 100644 --- a/tests/api-resources/beta/assistants/files.test.ts +++ b/tests/api-resources/beta/vector-stores/files.test.ts @@ -10,7 +10,7 @@ const openai = new OpenAI({ describe('resource files', () => { test('create: only required params', async () => { - const responsePromise = openai.beta.assistants.files.create('file-abc123', { file_id: 'string' }); + const responsePromise = openai.beta.vectorStores.files.create('vs_abc123', { file_id: 'string' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,11 +21,11 @@ describe('resource files', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.assistants.files.create('file-abc123', { file_id: 'string' }); + const response = await openai.beta.vectorStores.files.create('vs_abc123', { file_id: 'string' }); }); test('retrieve', async () => { - const responsePromise = openai.beta.assistants.files.retrieve('string', 'string'); + const responsePromise = openai.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -38,12 +38,14 @@ describe('resource files', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.assistants.files.retrieve('string', 'string', { path: '/_stainless_unknown_path' }), + openai.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123', { + path: '/_stainless_unknown_path', + }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('list', async () => { - const responsePromise = openai.beta.assistants.files.list('string'); + const responsePromise = openai.beta.vectorStores.files.list('string'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -56,23 +58,23 @@ describe('resource files', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.assistants.files.list('string', { path: '/_stainless_unknown_path' }), + openai.beta.vectorStores.files.list('string', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.assistants.files.list( + openai.beta.vectorStores.files.list( 'string', - { after: 'string', before: 'string', limit: 0, order: 'asc' }, + { after: 'string', before: 'string', filter: 'in_progress', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); }); test('del', async () => { - const responsePromise = openai.beta.assistants.files.del('string', 'string'); + const responsePromise = openai.beta.vectorStores.files.del('string', 'string'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -85,7 +87,7 @@ describe('resource files', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.assistants.files.del('string', 'string', { path: '/_stainless_unknown_path' }), + openai.beta.vectorStores.files.del('string', 'string', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/beta/vector-stores/vector-stores.test.ts b/tests/api-resources/beta/vector-stores/vector-stores.test.ts new file mode 100644 index 000000000..445fa9ebf --- /dev/null +++ b/tests/api-resources/beta/vector-stores/vector-stores.test.ts @@ -0,0 +1,97 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const openai = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource vectorStores', () => { + test('create', async () => { + const responsePromise = openai.beta.vectorStores.create({}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve', async () => { + const responsePromise = openai.beta.vectorStores.retrieve('string'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + openai.beta.vectorStores.retrieve('string', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('update', async () => { + const responsePromise = openai.beta.vectorStores.update('string', {}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list', async () => { + const responsePromise = openai.beta.vectorStores.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(openai.beta.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + openai.beta.vectorStores.list( + { after: 'string', before: 'string', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = openai.beta.vectorStores.del('string'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + openai.beta.vectorStores.del('string', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); From c2c998d3227469f9aa10d0427211d666fd2b2274 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 17 Apr 2024 12:35:09 -0400 Subject: [PATCH 069/533] release: 4.37.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c1ce2c41b..ad4acf8c5 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.36.0" + ".": "4.37.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 3ddd03a8b..4dd6cfb63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.37.0 (2024-04-17) + +Full Changelog: [v4.36.0...v4.37.0](https://github.com/openai/openai-node/compare/v4.36.0...v4.37.0) + +### Features + +* **api:** add vector stores ([#776](https://github.com/openai/openai-node/issues/776)) ([8bb929b](https://github.com/openai/openai-node/commit/8bb929b2ee91c1bec0a00347bf4f7628652d1be3)) + ## 4.36.0 (2024-04-16) Full Changelog: [v4.35.0...v4.36.0](https://github.com/openai/openai-node/compare/v4.35.0...v4.36.0) diff --git a/README.md b/README.md index b75320e78..99cc1cc75 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.36.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.37.0/mod.ts'; ``` diff --git a/build-deno b/build-deno index 6389062ec..fe9744b93 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.36.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.37.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index e848ce857..19ad959ee 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.36.0", + "version": "4.37.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 460925cae..33019f3de 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.36.0'; // x-release-please-version +export const VERSION = '4.37.0'; // x-release-please-version From 78f5c3568d95d8e854c04049dc7d5643aa49e93f Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 17 Apr 2024 15:19:44 -0400 Subject: [PATCH 070/533] chore(api): docs and response_format response property (#778) --- src/resources/beta/assistants.ts | 38 ++++++++++- src/resources/beta/threads/runs/runs.ts | 84 +++++++++++++++++++------ src/resources/beta/threads/threads.ts | 46 +++++++++++--- 3 files changed, 140 insertions(+), 28 deletions(-) diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index c0827848e..a24cee045 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -142,6 +142,31 @@ export interface Assistant { */ tools: Array; + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * message the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to + * produce JSON yourself via a system or user message. Without this, the model may + * generate an unending stream of whitespace until the generation reaches the token + * limit, resulting in a long-running and seemingly "stuck" request. Also note that + * the message content may be partially cut off if `finish_reason="length"`, which + * indicates the generation exceeded `max_tokens` or the conversation exceeded the + * max context length. + */ + response_format?: ThreadsAPI.AssistantResponseFormatOption | null; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. + */ + temperature?: number | null; + /** * A set of resources that are used by the assistant's tools. The resources are * specific to the type of tool. For example, the `code_interpreter` tool requires @@ -149,6 +174,15 @@ export interface Assistant { * IDs. */ tool_resources?: Assistant.ToolResources | null; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + top_p?: number | null; } export namespace Assistant { @@ -1012,7 +1046,7 @@ export interface AssistantCreateParams { /** * Specifies the format that the model must output. Compatible with * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -1158,7 +1192,7 @@ export interface AssistantUpdateParams { /** * Specifies the format that the model must output. Compatible with * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 9e42f8a20..48cfac546 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -409,7 +409,7 @@ export interface Run { /** * Specifies the format that the model must output. Compatible with * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -446,7 +446,7 @@ export interface Run { * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -459,6 +459,10 @@ export interface Run { */ tools: Array; + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ truncation_strategy: Run.TruncationStrategy | null; /** @@ -534,6 +538,10 @@ export namespace Run { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ export interface TruncationStrategy { /** * The truncation strategy to use for the thread. The default is `auto`. If set to @@ -620,7 +628,7 @@ export interface RunCreateParamsBase { * The maximum number of completion tokens that may be used over the course of the * run. The run will make a best effort to use only the number of completion tokens * specified, across multiple turns of the run. If the run exceeds the number of - * completion tokens specified, the run will end with status `complete`. See + * completion tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_completion_tokens?: number | null; @@ -629,7 +637,7 @@ export interface RunCreateParamsBase { * The maximum number of prompt tokens that may be used over the course of the run. * The run will make a best effort to use only the number of prompt tokens * specified, across multiple turns of the run. If the run exceeds the number of - * prompt tokens specified, the run will end with status `complete`. See + * prompt tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_prompt_tokens?: number | null; @@ -673,7 +681,7 @@ export interface RunCreateParamsBase { /** * Specifies the format that the model must output. Compatible with * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -706,7 +714,7 @@ export interface RunCreateParamsBase { * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -722,9 +730,15 @@ export interface RunCreateParamsBase { * An alternative to sampling with temperature, called nucleus sampling, where the * model considers the results of the tokens with top_p probability mass. So 0.1 * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ top_p?: number | null; + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ truncation_strategy?: RunCreateParams.TruncationStrategy | null; } @@ -770,6 +784,10 @@ export namespace RunCreateParams { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ export interface TruncationStrategy { /** * The truncation strategy to use for the thread. The default is `auto`. If set to @@ -865,7 +883,7 @@ export interface RunCreateAndPollParams { * The maximum number of completion tokens that may be used over the course of the * run. The run will make a best effort to use only the number of completion tokens * specified, across multiple turns of the run. If the run exceeds the number of - * completion tokens specified, the run will end with status `complete`. See + * completion tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_completion_tokens?: number | null; @@ -874,7 +892,7 @@ export interface RunCreateAndPollParams { * The maximum number of prompt tokens that may be used over the course of the run. * The run will make a best effort to use only the number of prompt tokens * specified, across multiple turns of the run. If the run exceeds the number of - * prompt tokens specified, the run will end with status `complete`. See + * prompt tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_prompt_tokens?: number | null; @@ -918,7 +936,7 @@ export interface RunCreateAndPollParams { /** * Specifies the format that the model must output. Compatible with * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -944,7 +962,7 @@ export interface RunCreateAndPollParams { * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -960,9 +978,15 @@ export interface RunCreateAndPollParams { * An alternative to sampling with temperature, called nucleus sampling, where the * model considers the results of the tokens with top_p probability mass. So 0.1 * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ top_p?: number | null; + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ truncation_strategy?: RunCreateAndPollParams.TruncationStrategy | null; } @@ -1008,6 +1032,10 @@ export namespace RunCreateAndPollParams { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ export interface TruncationStrategy { /** * The truncation strategy to use for the thread. The default is `auto`. If set to @@ -1056,7 +1084,7 @@ export interface RunCreateAndStreamParams { * The maximum number of completion tokens that may be used over the course of the * run. The run will make a best effort to use only the number of completion tokens * specified, across multiple turns of the run. If the run exceeds the number of - * completion tokens specified, the run will end with status `complete`. See + * completion tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_completion_tokens?: number | null; @@ -1065,7 +1093,7 @@ export interface RunCreateAndStreamParams { * The maximum number of prompt tokens that may be used over the course of the run. * The run will make a best effort to use only the number of prompt tokens * specified, across multiple turns of the run. If the run exceeds the number of - * prompt tokens specified, the run will end with status `complete`. See + * prompt tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_prompt_tokens?: number | null; @@ -1109,7 +1137,7 @@ export interface RunCreateAndStreamParams { /** * Specifies the format that the model must output. Compatible with * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -1135,7 +1163,7 @@ export interface RunCreateAndStreamParams { * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -1151,9 +1179,15 @@ export interface RunCreateAndStreamParams { * An alternative to sampling with temperature, called nucleus sampling, where the * model considers the results of the tokens with top_p probability mass. So 0.1 * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ top_p?: number | null; + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ truncation_strategy?: RunCreateAndStreamParams.TruncationStrategy | null; } @@ -1199,6 +1233,10 @@ export namespace RunCreateAndStreamParams { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ export interface TruncationStrategy { /** * The truncation strategy to use for the thread. The default is `auto`. If set to @@ -1247,7 +1285,7 @@ export interface RunStreamParams { * The maximum number of completion tokens that may be used over the course of the * run. The run will make a best effort to use only the number of completion tokens * specified, across multiple turns of the run. If the run exceeds the number of - * completion tokens specified, the run will end with status `complete`. See + * completion tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_completion_tokens?: number | null; @@ -1256,7 +1294,7 @@ export interface RunStreamParams { * The maximum number of prompt tokens that may be used over the course of the run. * The run will make a best effort to use only the number of prompt tokens * specified, across multiple turns of the run. If the run exceeds the number of - * prompt tokens specified, the run will end with status `complete`. See + * prompt tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ max_prompt_tokens?: number | null; @@ -1300,7 +1338,7 @@ export interface RunStreamParams { /** * Specifies the format that the model must output. Compatible with * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -1326,7 +1364,7 @@ export interface RunStreamParams { * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -1342,9 +1380,15 @@ export interface RunStreamParams { * An alternative to sampling with temperature, called nucleus sampling, where the * model considers the results of the tokens with top_p probability mass. So 0.1 * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ top_p?: number | null; + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ truncation_strategy?: RunStreamParams.TruncationStrategy | null; } @@ -1390,6 +1434,10 @@ export namespace RunStreamParams { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ export interface TruncationStrategy { /** * The truncation strategy to use for the thread. The default is `auto`. If set to diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index f3590ed80..6f1e761de 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -131,7 +131,7 @@ export interface AssistantResponseFormat { /** * Specifies the format that the model must output. Compatible with * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -170,7 +170,7 @@ export interface AssistantToolChoiceFunction { * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -511,7 +511,7 @@ export interface ThreadCreateAndRunParamsBase { /** * Specifies the format that the model must output. Compatible with * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -549,7 +549,7 @@ export interface ThreadCreateAndRunParamsBase { * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -575,9 +575,15 @@ export interface ThreadCreateAndRunParamsBase { * An alternative to sampling with temperature, called nucleus sampling, where the * model considers the results of the tokens with top_p probability mass. So 0.1 * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ top_p?: number | null; + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ truncation_strategy?: ThreadCreateAndRunParams.TruncationStrategy | null; } @@ -745,6 +751,10 @@ export namespace ThreadCreateAndRunParams { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ export interface TruncationStrategy { /** * The truncation strategy to use for the thread. The default is `auto`. If set to @@ -854,7 +864,7 @@ export interface ThreadCreateAndRunPollParams { /** * Specifies the format that the model must output. Compatible with * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -885,7 +895,7 @@ export interface ThreadCreateAndRunPollParams { * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -911,9 +921,15 @@ export interface ThreadCreateAndRunPollParams { * An alternative to sampling with temperature, called nucleus sampling, where the * model considers the results of the tokens with top_p probability mass. So 0.1 * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ top_p?: number | null; + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ truncation_strategy?: ThreadCreateAndRunPollParams.TruncationStrategy | null; } @@ -1081,6 +1097,10 @@ export namespace ThreadCreateAndRunPollParams { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ export interface TruncationStrategy { /** * The truncation strategy to use for the thread. The default is `auto`. If set to @@ -1169,7 +1189,7 @@ export interface ThreadCreateAndRunStreamParams { /** * Specifies the format that the model must output. Compatible with * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -1200,7 +1220,7 @@ export interface ThreadCreateAndRunStreamParams { * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or + * Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -1226,9 +1246,15 @@ export interface ThreadCreateAndRunStreamParams { * An alternative to sampling with temperature, called nucleus sampling, where the * model considers the results of the tokens with top_p probability mass. So 0.1 * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ top_p?: number | null; + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ truncation_strategy?: ThreadCreateAndRunStreamParams.TruncationStrategy | null; } @@ -1396,6 +1422,10 @@ export namespace ThreadCreateAndRunStreamParams { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ export interface TruncationStrategy { /** * The truncation strategy to use for the thread. The default is `auto`. If set to From f3a5360f9ca94c9d04a9141444a617b6cc04df86 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 17 Apr 2024 15:20:05 -0400 Subject: [PATCH 071/533] release: 4.37.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ad4acf8c5..b53631554 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.37.0" + ".": "4.37.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 4dd6cfb63..7483847df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.37.1 (2024-04-17) + +Full Changelog: [v4.37.0...v4.37.1](https://github.com/openai/openai-node/compare/v4.37.0...v4.37.1) + +### Chores + +* **api:** docs and response_format response property ([#778](https://github.com/openai/openai-node/issues/778)) ([78f5c35](https://github.com/openai/openai-node/commit/78f5c3568d95d8e854c04049dc7d5643aa49e93f)) + ## 4.37.0 (2024-04-17) Full Changelog: [v4.36.0...v4.37.0](https://github.com/openai/openai-node/compare/v4.36.0...v4.37.0) diff --git a/README.md b/README.md index 99cc1cc75..8c3ee4e08 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.37.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.37.1/mod.ts'; ``` diff --git a/build-deno b/build-deno index fe9744b93..64209f670 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.37.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.37.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 19ad959ee..1adeb496e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.37.0", + "version": "4.37.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 33019f3de..5f2e363d7 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.37.0'; // x-release-please-version +export const VERSION = '4.37.1'; // x-release-please-version From 442138d8cfb484f203350142f2cdb7e05bcfd30d Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 18 Apr 2024 10:48:38 -0400 Subject: [PATCH 072/533] feat(api): batch list endpoint (#781) --- .stats.yml | 2 +- api.md | 1 + src/index.ts | 2 ++ src/resources/batches.ts | 23 +++++++++++++++++++++++ src/resources/index.ts | 10 +++++++++- tests/api-resources/batches.test.ts | 25 +++++++++++++++++++++++++ 6 files changed, 61 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 2814bb777..c9a9bfa4a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1 +1 @@ -configured_endpoints: 62 +configured_endpoints: 63 diff --git a/api.md b/api.md index 8161fb2c7..e7a8d3a31 100644 --- a/api.md +++ b/api.md @@ -378,4 +378,5 @@ Methods: - client.batches.create({ ...params }) -> Batch - client.batches.retrieve(batchId) -> Batch +- client.batches.list({ ...params }) -> BatchesPage - client.batches.cancel(batchId) -> Batch diff --git a/src/index.ts b/src/index.ts index 7a776b2c1..8beec1e67 100644 --- a/src/index.ts +++ b/src/index.ts @@ -302,7 +302,9 @@ export namespace OpenAI { export import Batch = API.Batch; export import BatchError = API.BatchError; export import BatchRequestCounts = API.BatchRequestCounts; + export import BatchesPage = API.BatchesPage; export import BatchCreateParams = API.BatchCreateParams; + export import BatchListParams = API.BatchListParams; export import ErrorObject = API.ErrorObject; export import FunctionDefinition = API.FunctionDefinition; diff --git a/src/resources/batches.ts b/src/resources/batches.ts index 75b491a16..d0bb891e3 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -2,7 +2,9 @@ import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; +import { isRequestOptions } from 'openai/core'; import * as BatchesAPI from 'openai/resources/batches'; +import { CursorPage, type CursorPageParams } from 'openai/pagination'; export class Batches extends APIResource { /** @@ -19,6 +21,21 @@ export class Batches extends APIResource { return this._client.get(`/batches/${batchId}`, options); } + /** + * List your organization's batches. + */ + list(query?: BatchListParams, options?: Core.RequestOptions): Core.PagePromise; + list(options?: Core.RequestOptions): Core.PagePromise; + list( + query: BatchListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.getAPIList('/batches', BatchesPage, { query, ...options }); + } + /** * Cancels an in-progress batch. */ @@ -27,6 +44,8 @@ export class Batches extends APIResource { } } +export class BatchesPage extends CursorPage {} + export interface Batch { id: string; @@ -217,9 +236,13 @@ export interface BatchCreateParams { metadata?: Record | null; } +export interface BatchListParams extends CursorPageParams {} + export namespace Batches { export import Batch = BatchesAPI.Batch; export import BatchError = BatchesAPI.BatchError; export import BatchRequestCounts = BatchesAPI.BatchRequestCounts; + export import BatchesPage = BatchesAPI.BatchesPage; export import BatchCreateParams = BatchesAPI.BatchCreateParams; + export import BatchListParams = BatchesAPI.BatchListParams; } diff --git a/src/resources/index.ts b/src/resources/index.ts index 282e57ea1..6f8e8564c 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -3,7 +3,15 @@ export * from './chat/index'; export * from './shared'; export { Audio } from './audio/audio'; -export { Batch, BatchError, BatchRequestCounts, BatchCreateParams, Batches } from './batches'; +export { + Batch, + BatchError, + BatchRequestCounts, + BatchCreateParams, + BatchListParams, + BatchesPage, + Batches, +} from './batches'; export { Beta } from './beta/beta'; export { Completion, diff --git a/tests/api-resources/batches.test.ts b/tests/api-resources/batches.test.ts index e4a9015d1..2cd845de6 100644 --- a/tests/api-resources/batches.test.ts +++ b/tests/api-resources/batches.test.ts @@ -51,6 +51,31 @@ describe('resource batches', () => { ); }); + test('list', async () => { + const responsePromise = openai.batches.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(openai.batches.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + openai.batches.list({ after: 'string', limit: 0 }, { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + test('cancel', async () => { const responsePromise = openai.batches.cancel('string'); const rawResponse = await responsePromise.asResponse(); From a28e0ba60b59f7a85a03f48a0690477a27b7ca4f Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 18 Apr 2024 10:49:00 -0400 Subject: [PATCH 073/533] release: 4.38.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b53631554..b5441c013 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.37.1" + ".": "4.38.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7483847df..069aa171f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.38.0 (2024-04-18) + +Full Changelog: [v4.37.1...v4.38.0](https://github.com/openai/openai-node/compare/v4.37.1...v4.38.0) + +### Features + +* **api:** batch list endpoint ([#781](https://github.com/openai/openai-node/issues/781)) ([d226759](https://github.com/openai/openai-node/commit/d226759164fbed33198d8bdc315c98e1052dade8)) + ## 4.37.1 (2024-04-17) Full Changelog: [v4.37.0...v4.37.1](https://github.com/openai/openai-node/compare/v4.37.0...v4.37.1) diff --git a/README.md b/README.md index 8c3ee4e08..dade8aa9c 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.37.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.38.0/mod.ts'; ``` diff --git a/build-deno b/build-deno index 64209f670..8706c6633 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.37.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.38.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 1adeb496e..b331e024f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.37.1", + "version": "4.38.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 5f2e363d7..e36007fb9 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.37.1'; // x-release-please-version +export const VERSION = '4.38.0'; // x-release-please-version From 40c7e23900db7a8af43a23a9eb24b363a076a044 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 18 Apr 2024 18:32:30 -0400 Subject: [PATCH 074/533] fix(api): correct types for attachments (#783) --- src/resources/beta/threads/messages.ts | 8 ++--- src/resources/beta/threads/runs/runs.ts | 16 ++++----- src/resources/beta/threads/threads.ts | 16 ++++----- .../beta/threads/messages.test.ts | 6 ++-- .../beta/threads/runs/runs.test.ts | 18 +++++----- .../beta/threads/threads.test.ts | 36 +++++++++---------- 6 files changed, 50 insertions(+), 50 deletions(-) diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index f17b8508d..68fee1a94 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -369,12 +369,12 @@ export interface Message { export namespace Message { export interface Attachment { - add_to?: Array<'file_search' | 'code_interpreter'>; - /** * The ID of the file to attach to the message. */ file_id?: string; + + tools?: Array<'file_search' | 'code_interpreter'>; } /** @@ -523,12 +523,12 @@ export interface MessageCreateParams { export namespace MessageCreateParams { export interface Attachment { - add_to?: Array<'file_search' | 'code_interpreter'>; - /** * The ID of the file to attach to the message. */ file_id?: string; + + tools?: Array<'file_search' | 'code_interpreter'>; } } diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 48cfac546..d48619fba 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -775,12 +775,12 @@ export namespace RunCreateParams { export namespace AdditionalMessage { export interface Attachment { - add_to?: Array<'file_search' | 'code_interpreter'>; - /** * The ID of the file to attach to the message. */ file_id?: string; + + tools?: Array<'file_search' | 'code_interpreter'>; } } @@ -1023,12 +1023,12 @@ export namespace RunCreateAndPollParams { export namespace AdditionalMessage { export interface Attachment { - add_to?: Array<'file_search' | 'code_interpreter'>; - /** * The ID of the file to attach to the message. */ file_id?: string; + + tools?: Array<'file_search' | 'code_interpreter'>; } } @@ -1224,12 +1224,12 @@ export namespace RunCreateAndStreamParams { export namespace AdditionalMessage { export interface Attachment { - add_to?: Array<'file_search' | 'code_interpreter'>; - /** * The ID of the file to attach to the message. */ file_id?: string; + + tools?: Array<'file_search' | 'code_interpreter'>; } } @@ -1425,12 +1425,12 @@ export namespace RunStreamParams { export namespace AdditionalMessage { export interface Attachment { - add_to?: Array<'file_search' | 'code_interpreter'>; - /** * The ID of the file to attach to the message. */ file_id?: string; + + tools?: Array<'file_search' | 'code_interpreter'>; } } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 6f1e761de..5f325d33a 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -313,12 +313,12 @@ export namespace ThreadCreateParams { export namespace Message { export interface Attachment { - add_to?: Array<'file_search' | 'code_interpreter'>; - /** * The ID of the file to attach to the message. */ file_id?: string; + + tools?: Array<'file_search' | 'code_interpreter'>; } } @@ -648,12 +648,12 @@ export namespace ThreadCreateAndRunParams { export namespace Message { export interface Attachment { - add_to?: Array<'file_search' | 'code_interpreter'>; - /** * The ID of the file to attach to the message. */ file_id?: string; + + tools?: Array<'file_search' | 'code_interpreter'>; } } @@ -994,12 +994,12 @@ export namespace ThreadCreateAndRunPollParams { export namespace Message { export interface Attachment { - add_to?: Array<'file_search' | 'code_interpreter'>; - /** * The ID of the file to attach to the message. */ file_id?: string; + + tools?: Array<'file_search' | 'code_interpreter'>; } } @@ -1319,12 +1319,12 @@ export namespace ThreadCreateAndRunStreamParams { export namespace Message { export interface Attachment { - add_to?: Array<'file_search' | 'code_interpreter'>; - /** * The ID of the file to attach to the message. */ file_id?: string; + + tools?: Array<'file_search' | 'code_interpreter'>; } } diff --git a/tests/api-resources/beta/threads/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts index a0a025869..61ccebe9f 100644 --- a/tests/api-resources/beta/threads/messages.test.ts +++ b/tests/api-resources/beta/threads/messages.test.ts @@ -25,9 +25,9 @@ describe('resource messages', () => { content: 'x', role: 'user', attachments: [ - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, ], metadata: {}, }); diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 4a3743ca0..ea9c0761e 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -29,9 +29,9 @@ describe('resource runs', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, ], metadata: {}, }, @@ -39,9 +39,9 @@ describe('resource runs', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, ], metadata: {}, }, @@ -49,9 +49,9 @@ describe('resource runs', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, ], metadata: {}, }, diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index 0a5f70af4..6a697865b 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -37,9 +37,9 @@ describe('resource threads', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, ], metadata: {}, }, @@ -47,9 +47,9 @@ describe('resource threads', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, ], metadata: {}, }, @@ -57,9 +57,9 @@ describe('resource threads', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, ], metadata: {}, }, @@ -153,9 +153,9 @@ describe('resource threads', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, ], metadata: {}, }, @@ -163,9 +163,9 @@ describe('resource threads', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, ], metadata: {}, }, @@ -173,9 +173,9 @@ describe('resource threads', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, - { file_id: 'string', add_to: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, ], metadata: {}, }, From 73250098e554f130ba38b36f673b257808ee2f37 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 18 Apr 2024 18:32:50 -0400 Subject: [PATCH 075/533] release: 4.38.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b5441c013..27353849a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.38.0" + ".": "4.38.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 069aa171f..9e0f99a06 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.38.1 (2024-04-18) + +Full Changelog: [v4.38.0...v4.38.1](https://github.com/openai/openai-node/compare/v4.38.0...v4.38.1) + +### Bug Fixes + +* **api:** correct types for attachments ([#783](https://github.com/openai/openai-node/issues/783)) ([6893631](https://github.com/openai/openai-node/commit/6893631334f75e232ba130f5dd67f1230b1e5fa0)) + ## 4.38.0 (2024-04-18) Full Changelog: [v4.37.1...v4.38.0](https://github.com/openai/openai-node/compare/v4.37.1...v4.38.0) diff --git a/README.md b/README.md index dade8aa9c..cbc719a7c 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.38.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.38.1/mod.ts'; ``` diff --git a/build-deno b/build-deno index 8706c6633..0c7e2f7c1 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.38.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.38.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index b331e024f..a1ef89c27 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.38.0", + "version": "4.38.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index e36007fb9..dac71224d 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.38.0'; // x-release-please-version +export const VERSION = '4.38.1'; // x-release-please-version From 35fe06e8811d2692f2ae4adc23eb2ad77934592d Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 19 Apr 2024 13:42:26 -0400 Subject: [PATCH 076/533] fix(api): correct types for message attachment tools (#787) --- src/resources/beta/threads/messages.ts | 11 +- src/resources/beta/threads/runs/runs.ts | 20 ++- src/resources/beta/threads/threads.ts | 20 ++- .../beta/threads/messages.test.ts | 15 +- .../beta/threads/runs/runs.test.ts | 81 ++++++++- .../beta/threads/threads.test.ts | 162 ++++++++++++++++-- 6 files changed, 269 insertions(+), 40 deletions(-) diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 68fee1a94..559395ca5 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -4,6 +4,7 @@ import * as Core from 'openai/core'; import { APIResource } from 'openai/resource'; import { isRequestOptions } from 'openai/core'; import * as MessagesAPI from 'openai/resources/beta/threads/messages'; +import * as AssistantsAPI from 'openai/resources/beta/assistants'; import { CursorPage, type CursorPageParams } from 'openai/pagination'; export class Messages extends APIResource { @@ -374,7 +375,10 @@ export namespace Message { */ file_id?: string; - tools?: Array<'file_search' | 'code_interpreter'>; + /** + * The tools to add this file to. + */ + tools?: Array; } /** @@ -528,7 +532,10 @@ export namespace MessageCreateParams { */ file_id?: string; - tools?: Array<'file_search' | 'code_interpreter'>; + /** + * The tools to add this file to. + */ + tools?: Array; } } diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index d48619fba..a15565450 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -780,7 +780,10 @@ export namespace RunCreateParams { */ file_id?: string; - tools?: Array<'file_search' | 'code_interpreter'>; + /** + * The tools to add this file to. + */ + tools?: Array; } } @@ -1028,7 +1031,10 @@ export namespace RunCreateAndPollParams { */ file_id?: string; - tools?: Array<'file_search' | 'code_interpreter'>; + /** + * The tools to add this file to. + */ + tools?: Array; } } @@ -1229,7 +1235,10 @@ export namespace RunCreateAndStreamParams { */ file_id?: string; - tools?: Array<'file_search' | 'code_interpreter'>; + /** + * The tools to add this file to. + */ + tools?: Array; } } @@ -1430,7 +1439,10 @@ export namespace RunStreamParams { */ file_id?: string; - tools?: Array<'file_search' | 'code_interpreter'>; + /** + * The tools to add this file to. + */ + tools?: Array; } } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 5f325d33a..81ba31dba 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -318,7 +318,10 @@ export namespace ThreadCreateParams { */ file_id?: string; - tools?: Array<'file_search' | 'code_interpreter'>; + /** + * The tools to add this file to. + */ + tools?: Array; } } @@ -653,7 +656,10 @@ export namespace ThreadCreateAndRunParams { */ file_id?: string; - tools?: Array<'file_search' | 'code_interpreter'>; + /** + * The tools to add this file to. + */ + tools?: Array; } } @@ -999,7 +1005,10 @@ export namespace ThreadCreateAndRunPollParams { */ file_id?: string; - tools?: Array<'file_search' | 'code_interpreter'>; + /** + * The tools to add this file to. + */ + tools?: Array; } } @@ -1324,7 +1333,10 @@ export namespace ThreadCreateAndRunStreamParams { */ file_id?: string; - tools?: Array<'file_search' | 'code_interpreter'>; + /** + * The tools to add this file to. + */ + tools?: Array; } } diff --git a/tests/api-resources/beta/threads/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts index 61ccebe9f..eb1e78133 100644 --- a/tests/api-resources/beta/threads/messages.test.ts +++ b/tests/api-resources/beta/threads/messages.test.ts @@ -25,9 +25,18 @@ describe('resource messages', () => { content: 'x', role: 'user', attachments: [ - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { + file_id: 'string', + tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + }, + { + file_id: 'string', + tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + }, + { + file_id: 'string', + tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + }, ], metadata: {}, }); diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index ea9c0761e..85d97c34c 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -29,9 +29,30 @@ describe('resource runs', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, ], metadata: {}, }, @@ -39,9 +60,30 @@ describe('resource runs', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, ], metadata: {}, }, @@ -49,9 +91,30 @@ describe('resource runs', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, ], metadata: {}, }, diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index 6a697865b..f2521cd5b 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -37,9 +37,30 @@ describe('resource threads', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, ], metadata: {}, }, @@ -47,9 +68,30 @@ describe('resource threads', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, ], metadata: {}, }, @@ -57,9 +99,30 @@ describe('resource threads', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, ], metadata: {}, }, @@ -153,9 +216,30 @@ describe('resource threads', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, ], metadata: {}, }, @@ -163,9 +247,30 @@ describe('resource threads', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, ], metadata: {}, }, @@ -173,9 +278,30 @@ describe('resource threads', () => { role: 'user', content: 'x', attachments: [ - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, - { file_id: 'string', tools: ['file_search', 'code_interpreter'] }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, + { + file_id: 'string', + tools: [ + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + { type: 'code_interpreter' }, + ], + }, ], metadata: {}, }, From 4c041e03013dbd7de5bfeb02db42c5e657217167 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 19 Apr 2024 13:42:48 -0400 Subject: [PATCH 077/533] release: 4.38.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 27353849a..898054ccf 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.38.1" + ".": "4.38.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e0f99a06..e43fa8c67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.38.2 (2024-04-19) + +Full Changelog: [v4.38.1...v4.38.2](https://github.com/openai/openai-node/compare/v4.38.1...v4.38.2) + +### Bug Fixes + +* **api:** correct types for message attachment tools ([#787](https://github.com/openai/openai-node/issues/787)) ([8626884](https://github.com/openai/openai-node/commit/8626884abd2494aa081db9e50a2f268b6cebc5df)) + ## 4.38.1 (2024-04-18) Full Changelog: [v4.38.0...v4.38.1](https://github.com/openai/openai-node/compare/v4.38.0...v4.38.1) diff --git a/README.md b/README.md index cbc719a7c..2637372d7 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.38.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.38.2/mod.ts'; ``` diff --git a/build-deno b/build-deno index 0c7e2f7c1..8dfcec58d 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.38.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.38.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index a1ef89c27..74ced775c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.38.1", + "version": "4.38.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index dac71224d..cd209780f 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.38.1'; // x-release-please-version +export const VERSION = '4.38.2'; // x-release-please-version From 8947f195b2dfab7ceebe1e0bb5c886e229cd541f Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 22 Apr 2024 07:40:50 -0400 Subject: [PATCH 078/533] chore(internal): use @swc/jest for running tests (#793) --- jest.config.ts | 3 ++ package.json | 2 + src/index.ts | 7 ++-- yarn.lock | 102 +++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 110 insertions(+), 4 deletions(-) diff --git a/jest.config.ts b/jest.config.ts index f746f4bf9..445a87301 100644 --- a/jest.config.ts +++ b/jest.config.ts @@ -3,6 +3,9 @@ import type { JestConfigWithTsJest } from 'ts-jest'; const config: JestConfigWithTsJest = { preset: 'ts-jest/presets/default-esm', testEnvironment: 'node', + transform: { + '^.+\\.(t|j)sx?$': ['@swc/jest', { sourceMaps: 'inline' }], + }, moduleNameMapper: { '^openai$': '/src/index.ts', '^openai/_shims/auto/(.*)$': '/src/_shims/auto/$1-node', diff --git a/package.json b/package.json index 74ced775c..d43a76fe7 100644 --- a/package.json +++ b/package.json @@ -35,6 +35,8 @@ "web-streams-polyfill": "^3.2.1" }, "devDependencies": { + "@swc/core": "^1.3.102", + "@swc/jest": "^0.2.29", "@types/jest": "^29.4.0", "@typescript-eslint/eslint-plugin": "^6.7.0", "@typescript-eslint/parser": "^6.7.0", diff --git a/src/index.ts b/src/index.ts index 8beec1e67..1741a4816 100644 --- a/src/index.ts +++ b/src/index.ts @@ -194,6 +194,9 @@ export class OpenAI extends Core.APIClient { static InternalServerError = Errors.InternalServerError; static PermissionDeniedError = Errors.PermissionDeniedError; static UnprocessableEntityError = Errors.UnprocessableEntityError; + + static toFile = Uploads.toFile; + static fileFromPath = Uploads.fileFromPath; } export const { @@ -216,10 +219,6 @@ export import toFile = Uploads.toFile; export import fileFromPath = Uploads.fileFromPath; export namespace OpenAI { - // Helper functions - export import toFile = Uploads.toFile; - export import fileFromPath = Uploads.fileFromPath; - export import RequestOptions = Core.RequestOptions; export import Page = Pagination.Page; diff --git a/yarn.lock b/yarn.lock index 9cef21d9b..dda4d2e4a 100644 --- a/yarn.lock +++ b/yarn.lock @@ -432,6 +432,13 @@ slash "^3.0.0" strip-ansi "^6.0.0" +"@jest/create-cache-key-function@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/create-cache-key-function/-/create-cache-key-function-29.7.0.tgz#793be38148fab78e65f40ae30c36785f4ad859f0" + integrity sha512-4QqS3LY5PBmTRHj9sAg1HLoPzqAI0uOX6wI/TRqHIcOxlFidy6YEmCQJk6FSZjNLGCeubDMfmkWL+qaLKhSGQA== + dependencies: + "@jest/types" "^29.6.3" + "@jest/environment@^29.7.0": version "29.7.0" resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-29.7.0.tgz#24d61f54ff1f786f3cd4073b4b94416383baf2a7" @@ -662,6 +669,96 @@ dependencies: "@sinonjs/commons" "^3.0.0" +"@swc/core-darwin-arm64@1.4.16": + version "1.4.16" + resolved "https://registry.yarnpkg.com/@swc/core-darwin-arm64/-/core-darwin-arm64-1.4.16.tgz#2cd45d709ce76d448d96bf8d0006849541436611" + integrity sha512-UOCcH1GvjRnnM/LWT6VCGpIk0OhHRq6v1U6QXuPt5wVsgXnXQwnf5k3sG5Cm56hQHDvhRPY6HCsHi/p0oek8oQ== + +"@swc/core-darwin-x64@1.4.16": + version "1.4.16" + resolved "https://registry.yarnpkg.com/@swc/core-darwin-x64/-/core-darwin-x64-1.4.16.tgz#a5bc7d8b1dd850adb0bb95c6b5c742b92201fd01" + integrity sha512-t3bgqFoYLWvyVtVL6KkFNCINEoOrIlyggT/kJRgi1y0aXSr0oVgcrQ4ezJpdeahZZ4N+Q6vT3ffM30yIunELNA== + +"@swc/core-linux-arm-gnueabihf@1.4.16": + version "1.4.16" + resolved "https://registry.yarnpkg.com/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.4.16.tgz#961744908ee5cbb79bc009dcf58cc8b831111f38" + integrity sha512-DvHuwvEF86YvSd0lwnzVcjOTZ0jcxewIbsN0vc/0fqm9qBdMMjr9ox6VCam1n3yYeRtj4VFgrjeNFksqbUejdQ== + +"@swc/core-linux-arm64-gnu@1.4.16": + version "1.4.16" + resolved "https://registry.yarnpkg.com/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.4.16.tgz#43713be3f26757d82d2745dc25f8b63400e0a3d0" + integrity sha512-9Uu5YlPbyCvbidjKtYEsPpyZlu16roOZ5c2tP1vHfnU9bgf5Tz5q5VovSduNxPHx+ed2iC1b1URODHvDzbbDuQ== + +"@swc/core-linux-arm64-musl@1.4.16": + version "1.4.16" + resolved "https://registry.yarnpkg.com/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.4.16.tgz#394a7d030f3a61902bd3947bb9d70d26d42f3c81" + integrity sha512-/YZq/qB1CHpeoL0eMzyqK5/tYZn/rzKoCYDviFU4uduSUIJsDJQuQA/skdqUzqbheOXKAd4mnJ1hT04RbJ8FPQ== + +"@swc/core-linux-x64-gnu@1.4.16": + version "1.4.16" + resolved "https://registry.yarnpkg.com/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.4.16.tgz#71eb108b784f9d551ee8a35ebcdaed972f567981" + integrity sha512-UUjaW5VTngZYDcA8yQlrFmqs1tLi1TxbKlnaJwoNhel9zRQ0yG1YEVGrzTvv4YApSuIiDK18t+Ip927bwucuVQ== + +"@swc/core-linux-x64-musl@1.4.16": + version "1.4.16" + resolved "https://registry.yarnpkg.com/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.4.16.tgz#10dbaedb4e3dfc7268e3a9a66ad3431471ef035b" + integrity sha512-aFhxPifevDTwEDKPi4eRYWzC0p/WYJeiFkkpNU5Uc7a7M5iMWPAbPFUbHesdlb9Jfqs5c07oyz86u+/HySBNPQ== + +"@swc/core-win32-arm64-msvc@1.4.16": + version "1.4.16" + resolved "https://registry.yarnpkg.com/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.4.16.tgz#80247adff6c245ff32b44d773c1a148858cd655f" + integrity sha512-bTD43MbhIHL2s5QgCwyleaGwl96Gk/scF2TaVKdUe4QlJCDV/YK9h5oIBAp63ckHtE8GHlH4c8dZNBiAXn4Org== + +"@swc/core-win32-ia32-msvc@1.4.16": + version "1.4.16" + resolved "https://registry.yarnpkg.com/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.4.16.tgz#e540afc3ccf3224267b4ddfb408f9d9737984686" + integrity sha512-/lmZeAN/qV5XbK2SEvi8e2RkIg8FQNYiSA8y2/Zb4gTUMKVO5JMLH0BSWMiIKMstKDPDSxMWgwJaQHF8UMyPmQ== + +"@swc/core-win32-x64-msvc@1.4.16": + version "1.4.16" + resolved "https://registry.yarnpkg.com/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.4.16.tgz#f880939fca32c181adfe7e3abd2b6b7857bd3489" + integrity sha512-BPAfFfODWXtUu6SwaTTftDHvcbDyWBSI/oanUeRbQR5vVWkXoQ3cxLTsDluc3H74IqXS5z1Uyoe0vNo2hB1opA== + +"@swc/core@^1.3.102": + version "1.4.16" + resolved "https://registry.yarnpkg.com/@swc/core/-/core-1.4.16.tgz#d175bae2acfecd53bcbd4293f1fba5ec316634a0" + integrity sha512-Xaf+UBvW6JNuV131uvSNyMXHn+bh6LyKN4tbv7tOUFQpXyz/t9YWRE04emtlUW9Y0qrm/GKFCbY8n3z6BpZbTA== + dependencies: + "@swc/counter" "^0.1.2" + "@swc/types" "^0.1.5" + optionalDependencies: + "@swc/core-darwin-arm64" "1.4.16" + "@swc/core-darwin-x64" "1.4.16" + "@swc/core-linux-arm-gnueabihf" "1.4.16" + "@swc/core-linux-arm64-gnu" "1.4.16" + "@swc/core-linux-arm64-musl" "1.4.16" + "@swc/core-linux-x64-gnu" "1.4.16" + "@swc/core-linux-x64-musl" "1.4.16" + "@swc/core-win32-arm64-msvc" "1.4.16" + "@swc/core-win32-ia32-msvc" "1.4.16" + "@swc/core-win32-x64-msvc" "1.4.16" + +"@swc/counter@^0.1.2", "@swc/counter@^0.1.3": + version "0.1.3" + resolved "https://registry.yarnpkg.com/@swc/counter/-/counter-0.1.3.tgz#cc7463bd02949611c6329596fccd2b0ec782b0e9" + integrity sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ== + +"@swc/jest@^0.2.29": + version "0.2.36" + resolved "https://registry.yarnpkg.com/@swc/jest/-/jest-0.2.36.tgz#2797450a30d28b471997a17e901ccad946fe693e" + integrity sha512-8X80dp81ugxs4a11z1ka43FPhP+/e+mJNXJSxiNYk8gIX/jPBtY4gQTrKu/KIoco8bzKuPI5lUxjfLiGsfvnlw== + dependencies: + "@jest/create-cache-key-function" "^29.7.0" + "@swc/counter" "^0.1.3" + jsonc-parser "^3.2.0" + +"@swc/types@^0.1.5": + version "0.1.6" + resolved "https://registry.yarnpkg.com/@swc/types/-/types-0.1.6.tgz#2f13f748995b247d146de2784d3eb7195410faba" + integrity sha512-/JLo/l2JsT/LRd80C3HfbmVpxOAJ11FO2RCEslFrgzLltoP9j8XIbsyDcfCt2WWyX+CM96rBoNM+IToAkFOugg== + dependencies: + "@swc/counter" "^0.1.3" + "@ts-morph/common@~0.20.0": version "0.20.0" resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.20.0.tgz#3f161996b085ba4519731e4d24c35f6cba5b80af" @@ -2445,6 +2542,11 @@ json5@^2.2.2, json5@^2.2.3: resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283" integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== +jsonc-parser@^3.2.0: + version "3.2.1" + resolved "https://registry.yarnpkg.com/jsonc-parser/-/jsonc-parser-3.2.1.tgz#031904571ccf929d7670ee8c547545081cb37f1a" + integrity sha512-AilxAyFOAcK5wA1+LeaySVBrHsGQvUFCDWXKpZjzaL0PqW+xfBOttn8GNtWKFWqneyMZj41MWF9Kl6iPWLwgOA== + kleur@^3.0.3: version "3.0.3" resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" From 5fb93d5484b52f2b0717d1d579cf91bf88d93a68 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 22 Apr 2024 07:41:11 -0400 Subject: [PATCH 079/533] release: 4.38.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 898054ccf..01fad1d40 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.38.2" + ".": "4.38.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e43fa8c67..f4ad2ee3e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.38.3 (2024-04-22) + +Full Changelog: [v4.38.2...v4.38.3](https://github.com/openai/openai-node/compare/v4.38.2...v4.38.3) + +### Chores + +* **internal:** use @swc/jest for running tests ([#793](https://github.com/openai/openai-node/issues/793)) ([8947f19](https://github.com/openai/openai-node/commit/8947f195b2dfab7ceebe1e0bb5c886e229cd541f)) + ## 4.38.2 (2024-04-19) Full Changelog: [v4.38.1...v4.38.2](https://github.com/openai/openai-node/compare/v4.38.1...v4.38.2) diff --git a/README.md b/README.md index 2637372d7..1f763a096 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.38.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.38.3/mod.ts'; ``` diff --git a/build-deno b/build-deno index 8dfcec58d..0f27c0f35 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.38.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.38.3/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index d43a76fe7..7ccc457c3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.38.2", + "version": "4.38.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index cd209780f..848b87c16 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.38.2'; // x-release-please-version +export const VERSION = '4.38.3'; // x-release-please-version From 7d2fd6c66adce9dfb1d95e9e967c866a50b8501c Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 24 Apr 2024 11:58:21 -0400 Subject: [PATCH 080/533] fix(docs): doc improvements (#796) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1f763a096..443960a7b 100644 --- a/README.md +++ b/README.md @@ -119,7 +119,7 @@ More information on the lifecycle of a Run can be found in the [Run Lifecycle Do ### Bulk Upload Helpers -When creating an interacting with vector stores, you can use the polling helpers to monitor the status of operations. +When creating and interacting with vector stores, you can use the polling helpers to monitor the status of operations. For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once. ```ts From 80d7697be17a0577b5bd5af695f4cf3b425e4109 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 24 Apr 2024 14:33:11 -0400 Subject: [PATCH 081/533] fix(api): change timestamps to unix integers (#798) --- src/resources/batches.ts | 23 ++++++++++--------- src/resources/beta/vector-stores/files.ts | 6 +++++ .../beta/vector-stores/vector-stores.ts | 10 ++++---- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/src/resources/batches.ts b/src/resources/batches.ts index d0bb891e3..fb0470dcd 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -57,7 +57,7 @@ export interface Batch { /** * The Unix timestamp (in seconds) for when the batch was created. */ - created_at: string; + created_at: number; /** * The OpenAI API endpoint used by the batch. @@ -90,17 +90,17 @@ export interface Batch { /** * The Unix timestamp (in seconds) for when the batch was cancelled. */ - cancelled_at?: string; + cancelled_at?: number; /** * The Unix timestamp (in seconds) for when the batch started cancelling. */ - cancelling_at?: string; + cancelling_at?: number; /** * The Unix timestamp (in seconds) for when the batch was completed. */ - completed_at?: string; + completed_at?: number; /** * The ID of the file containing the outputs of requests with errors. @@ -112,27 +112,27 @@ export interface Batch { /** * The Unix timestamp (in seconds) for when the batch expired. */ - expired_at?: string; + expired_at?: number; /** * The Unix timestamp (in seconds) for when the batch will expire. */ - expires_at?: string; + expires_at?: number; /** * The Unix timestamp (in seconds) for when the batch failed. */ - failed_at?: string; + failed_at?: number; /** * The Unix timestamp (in seconds) for when the batch started finalizing. */ - finalizing_at?: string; + finalizing_at?: number; /** * The Unix timestamp (in seconds) for when the batch started processing. */ - in_progress_at?: string; + in_progress_at?: number; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -225,8 +225,9 @@ export interface BatchCreateParams { * See [upload file](https://platform.openai.com/docs/api-reference/files/create) * for how to upload a file. * - * Your input file must be formatted as a JSONL file, and must be uploaded with the - * purpose `batch`. + * Your input file must be formatted as a + * [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput), + * and must be uploaded with the purpose `batch`. */ input_file_id: string; diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index 40b97e9a9..a18211221 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -203,6 +203,12 @@ export interface VectorStoreFile { */ status: 'in_progress' | 'completed' | 'cancelled' | 'failed'; + /** + * The total vector store usage in bytes. Note that this may be different from the + * original file size. + */ + usage_bytes: number; + /** * The ID of the * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index 892d06aa4..0409f3af7 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -93,11 +93,6 @@ export interface VectorStore { */ id: string; - /** - * The byte size of the vector store. - */ - bytes: number; - /** * The Unix timestamp (in seconds) for when the vector store was created. */ @@ -135,6 +130,11 @@ export interface VectorStore { */ status: 'expired' | 'in_progress' | 'completed'; + /** + * The total number of bytes used by the files in the vector store. + */ + usage_bytes: number; + /** * The expiration policy for a vector store. */ From adf0524a01d53d3542bcc4700edc6206b717373b Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 24 Apr 2024 14:33:32 -0400 Subject: [PATCH 082/533] release: 4.38.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 14 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 01fad1d40..b299eb65b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.38.3" + ".": "4.38.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index f4ad2ee3e..06d0e4b32 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.38.4 (2024-04-24) + +Full Changelog: [v4.38.3...v4.38.4](https://github.com/openai/openai-node/compare/v4.38.3...v4.38.4) + +### Bug Fixes + +* **api:** change timestamps to unix integers ([#798](https://github.com/openai/openai-node/issues/798)) ([7271a6c](https://github.com/openai/openai-node/commit/7271a6cdc7d37151d2cae18fdd20b87d97624a84)) +* **docs:** doc improvements ([#796](https://github.com/openai/openai-node/issues/796)) ([49fcc86](https://github.com/openai/openai-node/commit/49fcc86b44958795a6f5e0901f369653dfbcc637)) + ## 4.38.3 (2024-04-22) Full Changelog: [v4.38.2...v4.38.3](https://github.com/openai/openai-node/compare/v4.38.2...v4.38.3) diff --git a/README.md b/README.md index 443960a7b..328744dc0 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.38.3/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.38.4/mod.ts'; ``` diff --git a/build-deno b/build-deno index 0f27c0f35..1e5c4096d 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.38.3/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.38.4/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 7ccc457c3..7576402c3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.38.3", + "version": "4.38.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 848b87c16..6071af9d7 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.38.3'; // x-release-please-version +export const VERSION = '4.38.4'; // x-release-please-version From 5ab7780ea8889818f403a9a89ab19585a7e8972e Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 24 Apr 2024 16:41:16 -0400 Subject: [PATCH 083/533] chore(internal): use actions/checkout@v4 for codeflow (#799) --- .github/workflows/create-releases.yml | 2 +- .github/workflows/publish-deno.yml | 2 +- .github/workflows/publish-npm.yml | 2 +- .github/workflows/release-doctor.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml index 7ecd6282a..d6d802e16 100644 --- a/.github/workflows/create-releases.yml +++ b/.github/workflows/create-releases.yml @@ -14,7 +14,7 @@ jobs: environment: publish steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: stainless-api/trigger-release-please@v1 id: release diff --git a/.github/workflows/publish-deno.yml b/.github/workflows/publish-deno.yml index 578b592b3..894c516a0 100644 --- a/.github/workflows/publish-deno.yml +++ b/.github/workflows/publish-deno.yml @@ -11,7 +11,7 @@ jobs: environment: publish steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Generate a token id: generate_token diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index 2258ec560..5a3711b53 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -11,7 +11,7 @@ jobs: environment: publish steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Node uses: actions/setup-node@v3 diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index b640869d0..3bb1d714f 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -13,7 +13,7 @@ jobs: if: github.repository == 'openai/openai-node' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Check release environment run: | From 125a7a46a4eb5a1d828bf5e195b9806964befc3d Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 24 Apr 2024 16:41:38 -0400 Subject: [PATCH 084/533] release: 4.38.5 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b299eb65b..f4b74e8b3 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.38.4" + ".": "4.38.5" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 06d0e4b32..8c69b6ecc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.38.5 (2024-04-24) + +Full Changelog: [v4.38.4...v4.38.5](https://github.com/openai/openai-node/compare/v4.38.4...v4.38.5) + +### Chores + +* **internal:** use actions/checkout@v4 for codeflow ([#799](https://github.com/openai/openai-node/issues/799)) ([5ab7780](https://github.com/openai/openai-node/commit/5ab7780ea8889818f403a9a89ab19585a7e8972e)) + ## 4.38.4 (2024-04-24) Full Changelog: [v4.38.3...v4.38.4](https://github.com/openai/openai-node/compare/v4.38.3...v4.38.4) diff --git a/README.md b/README.md index 328744dc0..ec0d5a03c 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.38.4/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.38.5/mod.ts'; ``` diff --git a/build-deno b/build-deno index 1e5c4096d..ec2c3f8a5 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.38.4/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.38.5/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index 7576402c3..c67a2fb77 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.38.4", + "version": "4.38.5", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 6071af9d7..bc0a54ad1 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.38.4'; // x-release-please-version +export const VERSION = '4.38.5'; // x-release-please-version From 75afedcedbf28328f7beedf59ebf64d4f8306e39 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 26 Apr 2024 16:30:06 -0400 Subject: [PATCH 085/533] chore(internal): add scripts/test and scripts/mock (#801) --- jest.config.ts | 1 + package.json | 2 +- scripts/mock | 34 ++++++++++++++++++++++++++++++++++ scripts/test | 29 +++++++++++++++++++++++++++++ 4 files changed, 65 insertions(+), 1 deletion(-) create mode 100755 scripts/mock create mode 100755 scripts/test diff --git a/jest.config.ts b/jest.config.ts index 445a87301..56d824cdc 100644 --- a/jest.config.ts +++ b/jest.config.ts @@ -17,6 +17,7 @@ const config: JestConfigWithTsJest = { '/deno/', '/deno_tests/', ], + testPathIgnorePatterns: ['scripts'], }; export default config; diff --git a/package.json b/package.json index c67a2fb77..f94e0590b 100644 --- a/package.json +++ b/package.json @@ -14,7 +14,7 @@ ], "private": false, "scripts": { - "test": "bin/check-test-server && yarn jest", + "test": "./scripts/test", "build": "bash ./build", "prepack": "echo 'to pack, run yarn build && (cd dist; yarn pack)' && exit 1", "prepublishOnly": "echo 'to publish, run yarn build && (cd dist; yarn publish)' && exit 1", diff --git a/scripts/mock b/scripts/mock new file mode 100755 index 000000000..61c6988a3 --- /dev/null +++ b/scripts/mock @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +if [ -z "$1" ]; then + URL="$1" + shift +else + URL="$(grep 'openapi_spec_url' .stats.yml | cut -d' ' -f2)" +fi + +# Check if the URL is empty +if [ -z "$URL" ]; then + echo "Error: No OpenAPI spec path/url provided or found in .stats.yml" + exit 1 +fi + +# Run prism mock on the given spec +if [ "$1" == "--daemon" ]; then + npm exec prism mock "$URL" &> .prism.log & + + # Wait for server to come online + while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do + echo -n "." + sleep 0.1 + done + + if grep -q "✖ fatal" ".prism.log"; then + cat .prism.log + exit 1 + fi + + echo +else + npm exec prism mock "$URL" +fi diff --git a/scripts/test b/scripts/test new file mode 100755 index 000000000..f01384e68 --- /dev/null +++ b/scripts/test @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +function prism_is_running() { + curl --silent "http://localhost:4010/" >/dev/null 2>&1 +} + +kill_server_on_port() { + pids=$(lsof -t -i tcp:"$1" || echo "") + if [ "$pids" != "" ]; then + kill "$pids" + echo "Stopped $pids." + fi +} + +if ! prism_is_running; then + # When we exit this script, make sure to kill the background mock server process + trap 'kill_server_on_port 4010' EXIT + + # Start the dev server + ./scripts/mock --daemon + + # Sanity check and print a nice error message + if ! ./bin/check-test-server; then + exit + fi +fi + +# Run tests +./node_modules/.bin/jest From 11d3f0d8a4ff883e9267d9e9b390c2fea27d718f Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 29 Apr 2024 13:01:36 -0400 Subject: [PATCH 086/533] feat(api): add required tool_choice (#803) --- src/resources/beta/threads/runs/runs.ts | 25 ++++++++++++--------- src/resources/beta/threads/threads.ts | 22 ++++++++++-------- src/resources/chat/completions.ts | 30 ++++++++++++------------- 3 files changed, 43 insertions(+), 34 deletions(-) diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index a15565450..18095886a 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -445,8 +445,9 @@ export interface Run { /** * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value - * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "file_search"}` or + * and means the model can pick between generating a message or calling one or more + * tools. `required` means the model must call one or more tools before responding + * to the user. Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -713,8 +714,9 @@ export interface RunCreateParamsBase { /** * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value - * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "file_search"}` or + * and means the model can pick between generating a message or calling one or more + * tools. `required` means the model must call one or more tools before responding + * to the user. Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -964,8 +966,9 @@ export interface RunCreateAndPollParams { /** * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value - * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "file_search"}` or + * and means the model can pick between generating a message or calling one or more + * tools. `required` means the model must call one or more tools before responding + * to the user. Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -1168,8 +1171,9 @@ export interface RunCreateAndStreamParams { /** * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value - * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "file_search"}` or + * and means the model can pick between generating a message or calling one or more + * tools. `required` means the model must call one or more tools before responding + * to the user. Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -1372,8 +1376,9 @@ export interface RunStreamParams { /** * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value - * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "file_search"}` or + * and means the model can pick between generating a message or calling one or more + * tools. `required` means the model must call one or more tools before responding + * to the user. Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 81ba31dba..b8b3ff2be 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -169,12 +169,13 @@ export interface AssistantToolChoiceFunction { /** * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value - * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "file_search"}` or + * and means the model can pick between generating a message or calling one or more + * tools. `required` means the model must call one or more tools before responding + * to the user. Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ -export type AssistantToolChoiceOption = 'none' | 'auto' | AssistantToolChoice; +export type AssistantToolChoiceOption = 'none' | 'auto' | 'required' | AssistantToolChoice; /** * Represents a thread that contains @@ -551,8 +552,9 @@ export interface ThreadCreateAndRunParamsBase { /** * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value - * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "file_search"}` or + * and means the model can pick between generating a message or calling one or more + * tools. `required` means the model must call one or more tools before responding + * to the user. Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -900,8 +902,9 @@ export interface ThreadCreateAndRunPollParams { /** * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value - * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "file_search"}` or + * and means the model can pick between generating a message or calling one or more + * tools. `required` means the model must call one or more tools before responding + * to the user. Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ @@ -1228,8 +1231,9 @@ export interface ThreadCreateAndRunStreamParams { /** * Controls which (if any) tool is called by the model. `none` means the model will * not call any tools and instead generates a message. `auto` is the default value - * and means the model can pick between generating a message or calling a tool. - * Specifying a particular tool like `{"type": "file_search"}` or + * and means the model can pick between generating a message or calling one or more + * tools. `required` means the model must call one or more tools before responding + * to the user. Specifying a particular tool like `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index b9672f52b..467b33619 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -598,17 +598,17 @@ export interface ChatCompletionTool { } /** - * Controls which (if any) function is called by the model. `none` means the model - * will not call a function and instead generates a message. `auto` means the model - * can pick between generating a message or calling a function. Specifying a - * particular function via + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tool and instead generates a message. `auto` means the model can + * pick between generating a message or calling one or more tools. `required` means + * the model must call one or more tools. Specifying a particular tool via * `{"type": "function", "function": {"name": "my_function"}}` forces the model to - * call that function. + * call that tool. * - * `none` is the default when no functions are present. `auto` is the default if - * functions are present. + * `none` is the default when no tools are present. `auto` is the default if tools + * are present. */ -export type ChatCompletionToolChoiceOption = 'none' | 'auto' | ChatCompletionNamedToolChoice; +export type ChatCompletionToolChoiceOption = 'none' | 'auto' | 'required' | ChatCompletionNamedToolChoice; export interface ChatCompletionToolMessageParam { /** @@ -796,15 +796,15 @@ export interface ChatCompletionCreateParamsBase { temperature?: number | null; /** - * Controls which (if any) function is called by the model. `none` means the model - * will not call a function and instead generates a message. `auto` means the model - * can pick between generating a message or calling a function. Specifying a - * particular function via + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tool and instead generates a message. `auto` means the model can + * pick between generating a message or calling one or more tools. `required` means + * the model must call one or more tools. Specifying a particular tool via * `{"type": "function", "function": {"name": "my_function"}}` forces the model to - * call that function. + * call that tool. * - * `none` is the default when no functions are present. `auto` is the default if - * functions are present. + * `none` is the default when no tools are present. `auto` is the default if tools + * are present. */ tool_choice?: ChatCompletionToolChoiceOption; From 3dabda8c13deb31899cc2aef76fefe4572d09481 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 29 Apr 2024 13:01:58 -0400 Subject: [PATCH 087/533] release: 4.39.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ README.md | 2 +- build-deno | 2 +- package.json | 2 +- src/version.ts | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f4b74e8b3..922d48efa 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.38.5" + ".": "4.39.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c69b6ecc..f75ad31ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.39.0 (2024-04-29) + +Full Changelog: [v4.38.5...v4.39.0](https://github.com/openai/openai-node/compare/v4.38.5...v4.39.0) + +### Features + +* **api:** add required tool_choice ([#803](https://github.com/openai/openai-node/issues/803)) ([99693e6](https://github.com/openai/openai-node/commit/99693e61debc67327a45dffb2c10c113341bffd6)) + + +### Chores + +* **internal:** add scripts/test and scripts/mock ([#801](https://github.com/openai/openai-node/issues/801)) ([6656105](https://github.com/openai/openai-node/commit/6656105fa1346a91d17e2b7a5e075f3091310c2f)) + ## 4.38.5 (2024-04-24) Full Changelog: [v4.38.4...v4.38.5](https://github.com/openai/openai-node/compare/v4.38.4...v4.38.5) diff --git a/README.md b/README.md index ec0d5a03c..abad61be4 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.38.5/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.39.0/mod.ts'; ``` diff --git a/build-deno b/build-deno index ec2c3f8a5..bbe4888f7 100755 --- a/build-deno +++ b/build-deno @@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.38.5/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.39.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/package.json b/package.json index f94e0590b..9232d8f7b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.38.5", + "version": "4.39.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index bc0a54ad1..b54588e20 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.38.5'; // x-release-please-version +export const VERSION = '4.39.0'; // x-release-please-version From 928351928054feb56f8797587c70f74d06c2737c Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 30 Apr 2024 11:36:38 -0400 Subject: [PATCH 088/533] chore(internal): refactor scripts (#806) --- bin/check-test-server | 50 ------------------- package.json | 6 +-- release-please-config.json | 2 +- scripts/bootstrap | 9 ++++ build => scripts/build | 15 +++--- build-deno => scripts/build-deno | 4 +- scripts/git-publish-deno.sh | 5 +- scripts/lint | 7 +++ scripts/mock | 10 ++-- scripts/test | 36 ++++++++++--- .../{ => utils}/check-is-in-git-install.sh | 0 scripts/{ => utils}/check-version.cjs | 4 +- scripts/{ => utils}/denoify.ts | 4 +- scripts/{ => utils}/fix-index-exports.cjs | 2 +- .../{ => utils}/make-dist-package-json.cjs | 2 +- scripts/{ => utils}/postprocess-files.cjs | 6 +-- 16 files changed, 82 insertions(+), 80 deletions(-) delete mode 100755 bin/check-test-server create mode 100755 scripts/bootstrap rename build => scripts/build (84%) rename build-deno => scripts/build-deno (93%) create mode 100755 scripts/lint rename scripts/{ => utils}/check-is-in-git-install.sh (100%) rename scripts/{ => utils}/check-version.cjs (82%) rename scripts/{ => utils}/denoify.ts (98%) rename scripts/{ => utils}/fix-index-exports.cjs (86%) rename scripts/{ => utils}/make-dist-package-json.cjs (87%) rename scripts/{ => utils}/postprocess-files.cjs (97%) diff --git a/bin/check-test-server b/bin/check-test-server deleted file mode 100755 index a6fa34950..000000000 --- a/bin/check-test-server +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash - -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[0;33m' -NC='\033[0m' # No Color - -function prism_is_running() { - curl --silent "http://localhost:4010/" >/dev/null 2>&1 -} - -function is_overriding_api_base_url() { - [ -n "$TEST_API_BASE_URL" ] -} - -if is_overriding_api_base_url ; then - # If someone is running the tests against the live API, we can trust they know - # what they're doing and exit early. - echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" - - exit 0 -elif prism_is_running ; then - echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" - echo - - exit 0 -else - echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" - echo -e "running against your OpenAPI spec." - echo - echo -e "${YELLOW}To fix:${NC}" - echo - echo -e "1. Install Prism (requires Node 16+):" - echo - echo -e " With npm:" - echo -e " \$ ${YELLOW}npm install -g @stoplight/prism-cli${NC}" - echo - echo -e " With yarn:" - echo -e " \$ ${YELLOW}yarn global add @stoplight/prism-cli${NC}" - echo - echo -e "2. Run the mock server" - echo - echo -e " To run the server, pass in the path of your OpenAPI" - echo -e " spec to the prism command:" - echo - echo -e " \$ ${YELLOW}prism mock path/to/your.openapi.yml${NC}" - echo - - exit 1 -fi diff --git a/package.json b/package.json index 9232d8f7b..35124ee7a 100644 --- a/package.json +++ b/package.json @@ -15,13 +15,13 @@ "private": false, "scripts": { "test": "./scripts/test", - "build": "bash ./build", + "build": "./scripts/build", "prepack": "echo 'to pack, run yarn build && (cd dist; yarn pack)' && exit 1", "prepublishOnly": "echo 'to publish, run yarn build && (cd dist; yarn publish)' && exit 1", "format": "prettier --write --cache --cache-strategy metadata . !dist", - "prepare": "if ./scripts/check-is-in-git-install.sh; then npm run build; fi", + "prepare": "if ./scripts/utils/check-is-in-git-install.sh; then ./scripts/build; fi", "tsn": "ts-node -r tsconfig-paths/register", - "lint": "eslint --ext ts,js .", + "lint": "./scripts/lint", "fix": "eslint --fix --ext ts,js ." }, "dependencies": { diff --git a/release-please-config.json b/release-please-config.json index 37def5fe1..586d57e0b 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -63,6 +63,6 @@ "extra-files": [ "src/version.ts", "README.md", - "build-deno" + "./scripts/build-deno" ] } diff --git a/scripts/bootstrap b/scripts/bootstrap new file mode 100755 index 000000000..6752d0e6b --- /dev/null +++ b/scripts/bootstrap @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +PACKAGE_MANAGER=$(command -v yarn >/dev/null 2>&1 && echo "yarn" || echo "npm") + +$PACKAGE_MANAGER install diff --git a/build b/scripts/build similarity index 84% rename from build rename to scripts/build index 85d5b4bc8..aa7c61f02 100755 --- a/build +++ b/scripts/build @@ -1,7 +1,10 @@ #!/usr/bin/env bash + set -exuo pipefail -node scripts/check-version.cjs +cd "$(dirname "$0")/.." + +node scripts/utils/check-version.cjs # Build into dist and will publish the package from there, # so that src/resources/foo.ts becomes /resources/foo.js @@ -22,7 +25,7 @@ if [ -e "bin/cli" ]; then fi # this converts the export map paths for the dist directory # and does a few other minor things -node scripts/make-dist-package-json.cjs > dist/package.json +node scripts/utils/make-dist-package-json.cjs > dist/package.json # build to .js/.mjs/.d.ts files npm exec tsc-multi @@ -32,7 +35,7 @@ cp src/_shims/auto/*.{d.ts,js,mjs} dist/_shims/auto # we need to add exports = module.exports = OpenAI Node to index.js; # No way to get that from index.ts because it would cause compile errors # when building .mjs -node scripts/fix-index-exports.cjs +node scripts/utils/fix-index-exports.cjs # with "moduleResolution": "nodenext", if ESM resolves to index.d.ts, # it'll have TS errors on the default import. But if it resolves to # index.d.mts the default import will work (even though both files have @@ -40,14 +43,14 @@ node scripts/fix-index-exports.cjs cp dist/index.d.ts dist/index.d.mts cp tsconfig.dist-src.json dist/src/tsconfig.json -node scripts/postprocess-files.cjs +node scripts/utils/postprocess-files.cjs # make sure that nothing crashes when we require the output CJS or # import the output ESM (cd dist && node -e 'require("openai")') (cd dist && node -e 'import("openai")' --input-type=module) -if command -v deno &> /dev/null && [ -e ./build-deno ] +if command -v deno &> /dev/null && [ -e ./scripts/build-deno ] then - ./build-deno + ./scripts/build-deno fi diff --git a/build-deno b/scripts/build-deno similarity index 93% rename from build-deno rename to scripts/build-deno index bbe4888f7..28002850b 100755 --- a/build-deno +++ b/scripts/build-deno @@ -2,6 +2,8 @@ set -exuo pipefail +cd "$(dirname "$0")/.." + rm -rf deno; mkdir deno cp -rp src/* deno @@ -37,7 +39,7 @@ done for file in LICENSE CHANGELOG.md; do if [ -e "${file}" ]; then cp "${file}" deno; fi done -npm exec ts-node -T -- scripts/denoify.ts +npm exec ts-node -T -- scripts/utils/denoify.ts deno fmt deno deno check deno/mod.ts if [ -e deno_tests ]; then diff --git a/scripts/git-publish-deno.sh b/scripts/git-publish-deno.sh index 4098994f3..701db735e 100755 --- a/scripts/git-publish-deno.sh +++ b/scripts/git-publish-deno.sh @@ -1,6 +1,9 @@ #!/usr/bin/env bash + set -exuo pipefail +cd "$(dirname "$0")/.." + # This script pushes the contents of the `deno` directory to the `deno` branch, # and creates a `vx.x.x-deno` tag, so that Deno users can # import OpenAI from "https://raw.githubusercontent.com/openai/openai-node/vx.x.x-deno/mod.ts" @@ -38,7 +41,7 @@ else : "${DENO_PUSH_RELEASE_TAG:="v$DENO_PUSH_VERSION"}" fi -if [ ! -e deno ]; then ./build; fi +if [ ! -e deno ]; then ./scripts/build; fi # We want to commit and push a branch where everything inside the deno # directory is at root level in the branch. diff --git a/scripts/lint b/scripts/lint new file mode 100755 index 000000000..4f05d6609 --- /dev/null +++ b/scripts/lint @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +./node_modules/.bin/eslint --ext ts,js . diff --git a/scripts/mock b/scripts/mock index 61c6988a3..2bba22723 100755 --- a/scripts/mock +++ b/scripts/mock @@ -1,6 +1,10 @@ #!/usr/bin/env bash -if [ -z "$1" ]; then +set -e + +cd "$(dirname "$0")/.." + +if [ -n "$1" ]; then URL="$1" shift else @@ -15,7 +19,7 @@ fi # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then - npm exec prism mock "$URL" &> .prism.log & + npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock "$URL" &> .prism.log & # Wait for server to come online while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do @@ -30,5 +34,5 @@ if [ "$1" == "--daemon" ]; then echo else - npm exec prism mock "$URL" + npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock "$URL" fi diff --git a/scripts/test b/scripts/test index f01384e68..48b637a40 100755 --- a/scripts/test +++ b/scripts/test @@ -1,5 +1,14 @@ #!/usr/bin/env bash +set -e + +cd "$(dirname "$0")/.." + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +NC='\033[0m' # No Color + function prism_is_running() { curl --silent "http://localhost:4010/" >/dev/null 2>&1 } @@ -12,17 +21,32 @@ kill_server_on_port() { fi } -if ! prism_is_running; then +function is_overriding_api_base_url() { + [ -n "$TEST_API_BASE_URL" ] +} + +if ! is_overriding_api_base_url && ! prism_is_running ; then # When we exit this script, make sure to kill the background mock server process trap 'kill_server_on_port 4010' EXIT # Start the dev server - ./scripts/mock --daemon + ./scripts/mock --daemon &> /dev/null +fi - # Sanity check and print a nice error message - if ! ./bin/check-test-server; then - exit - fi +if ! prism_is_running ; then + echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" + echo -e "running against your OpenAPI spec." + echo + echo -e "To run the server, pass in the path or url of your OpenAPI" + echo -e "spec to the prism command:" + echo + echo -e " \$ ${YELLOW}npm exec prism mock path/to/your.openapi.yml${NC}" + echo + + exit 1 +else + echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" + echo fi # Run tests diff --git a/scripts/check-is-in-git-install.sh b/scripts/utils/check-is-in-git-install.sh similarity index 100% rename from scripts/check-is-in-git-install.sh rename to scripts/utils/check-is-in-git-install.sh diff --git a/scripts/check-version.cjs b/scripts/utils/check-version.cjs similarity index 82% rename from scripts/check-version.cjs rename to scripts/utils/check-version.cjs index 50a85669e..86c56dfd3 100644 --- a/scripts/check-version.cjs +++ b/scripts/utils/check-version.cjs @@ -2,14 +2,14 @@ const fs = require('fs'); const path = require('path'); const main = () => { - const pkg = require('../package.json'); + const pkg = require('../../package.json'); const version = pkg['version']; if (!version) throw 'The version property is not set in the package.json file'; if (typeof version !== 'string') { throw `Unexpected type for the package.json version field; got ${typeof version}, expected string`; } - const versionFile = path.resolve(__dirname, '..', 'src', 'version.ts'); + const versionFile = path.resolve(__dirname, '..', '..', 'src', 'version.ts'); const contents = fs.readFileSync(versionFile, 'utf8'); const output = contents.replace(/(export const VERSION = ')(.*)(')/g, `$1${version}$3`); fs.writeFileSync(versionFile, output); diff --git a/scripts/denoify.ts b/scripts/utils/denoify.ts similarity index 98% rename from scripts/denoify.ts rename to scripts/utils/denoify.ts index 9922b7bf8..742bc069f 100644 --- a/scripts/denoify.ts +++ b/scripts/utils/denoify.ts @@ -1,9 +1,9 @@ import path from 'path'; import * as tm from 'ts-morph'; -import { name as pkgName } from '../package.json'; +import { name as pkgName } from '../../package.json'; import fs from 'fs'; -const rootDir = path.resolve(__dirname, '..'); +const rootDir = path.resolve(__dirname, '../..'); const denoDir = path.join(rootDir, 'deno'); const tsConfigFilePath = path.join(rootDir, 'tsconfig.deno.json'); diff --git a/scripts/fix-index-exports.cjs b/scripts/utils/fix-index-exports.cjs similarity index 86% rename from scripts/fix-index-exports.cjs rename to scripts/utils/fix-index-exports.cjs index b61b2ea33..72b0b8fd0 100644 --- a/scripts/fix-index-exports.cjs +++ b/scripts/utils/fix-index-exports.cjs @@ -4,7 +4,7 @@ const path = require('path'); const indexJs = process.env['DIST_PATH'] ? path.resolve(process.env['DIST_PATH'], 'index.js') - : path.resolve(__dirname, '..', 'dist', 'index.js'); + : path.resolve(__dirname, '..', '..', 'dist', 'index.js'); let before = fs.readFileSync(indexJs, 'utf8'); let after = before.replace( diff --git a/scripts/make-dist-package-json.cjs b/scripts/utils/make-dist-package-json.cjs similarity index 87% rename from scripts/make-dist-package-json.cjs rename to scripts/utils/make-dist-package-json.cjs index d4a0a69b3..7c24f56e2 100644 --- a/scripts/make-dist-package-json.cjs +++ b/scripts/utils/make-dist-package-json.cjs @@ -1,4 +1,4 @@ -const pkgJson = require(process.env['PKG_JSON_PATH'] || '../package.json'); +const pkgJson = require(process.env['PKG_JSON_PATH'] || '../../package.json'); function processExportMap(m) { for (const key in m) { diff --git a/scripts/postprocess-files.cjs b/scripts/utils/postprocess-files.cjs similarity index 97% rename from scripts/postprocess-files.cjs rename to scripts/utils/postprocess-files.cjs index 8fd9ec8db..c46a46d07 100644 --- a/scripts/postprocess-files.cjs +++ b/scripts/utils/postprocess-files.cjs @@ -2,12 +2,12 @@ const fs = require('fs'); const path = require('path'); const { parse } = require('@typescript-eslint/parser'); -const pkgImportPath = process.env['PKG_IMPORT_PATH'] ?? 'openai/' +const pkgImportPath = process.env['PKG_IMPORT_PATH'] ?? 'openai/'; const distDir = process.env['DIST_PATH'] ? path.resolve(process.env['DIST_PATH']) - : path.resolve(__dirname, '..', 'dist'); + : path.resolve(__dirname, '..', '..', 'dist'); const distSrcDir = path.join(distDir, 'src'); /** @@ -103,7 +103,7 @@ async function* walk(dir) { } async function postprocess() { - for await (const file of walk(path.resolve(__dirname, '..', 'dist'))) { + for await (const file of walk(path.resolve(__dirname, '..', '..', 'dist'))) { if (!/\.([cm]?js|(\.d)?[cm]?ts)$/.test(file)) continue; const code = await fs.promises.readFile(file, 'utf8'); From ecc2eaec602eb9fe518f011920d8500e01fde01b Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 30 Apr 2024 12:31:37 -0400 Subject: [PATCH 089/533] chore(internal): fix release please for deno (#808) --- release-please-config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release-please-config.json b/release-please-config.json index 586d57e0b..0a9347796 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -63,6 +63,6 @@ "extra-files": [ "src/version.ts", "README.md", - "./scripts/build-deno" + "scripts/build-deno" ] } From 61b5b83e82dd723e9584232f3b805ed13e58e13d Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 30 Apr 2024 13:05:06 -0400 Subject: [PATCH 090/533] chore(internal): add link to openapi spec (#810) --- .stats.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.stats.yml b/.stats.yml index c9a9bfa4a..e904583da 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1 +1,2 @@ configured_endpoints: 63 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0839c14b2b61dad4e830884410cfc3695546682ced009e50583c8bb5c44512d7.yml From 3aaff04b1cdee987ad99261283684ca8b91990cd Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 30 Apr 2024 13:05:27 -0400 Subject: [PATCH 091/533] release: 4.39.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 10 ++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 15 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 922d48efa..2781849c1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.39.0" + ".": "4.39.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index f75ad31ff..b7b3548fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Changelog +## 4.39.1 (2024-04-30) + +Full Changelog: [v4.39.0...v4.39.1](https://github.com/openai/openai-node/compare/v4.39.0...v4.39.1) + +### Chores + +* **internal:** add link to openapi spec ([#810](https://github.com/openai/openai-node/issues/810)) ([61b5b83](https://github.com/openai/openai-node/commit/61b5b83e82dd723e9584232f3b805ed13e58e13d)) +* **internal:** fix release please for deno ([#808](https://github.com/openai/openai-node/issues/808)) ([ecc2eae](https://github.com/openai/openai-node/commit/ecc2eaec602eb9fe518f011920d8500e01fde01b)) +* **internal:** refactor scripts ([#806](https://github.com/openai/openai-node/issues/806)) ([9283519](https://github.com/openai/openai-node/commit/928351928054feb56f8797587c70f74d06c2737c)) + ## 4.39.0 (2024-04-29) Full Changelog: [v4.38.5...v4.39.0](https://github.com/openai/openai-node/compare/v4.38.5...v4.39.0) diff --git a/README.md b/README.md index abad61be4..4805a65af 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.39.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.39.1/mod.ts'; ``` diff --git a/package.json b/package.json index 35124ee7a..3576a12de 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.39.0", + "version": "4.39.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 28002850b..cffc84e8a 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.39.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.39.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index b54588e20..6e3099397 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.39.0'; // x-release-please-version +export const VERSION = '4.39.1'; // x-release-please-version From 585bdd7371ed9494c686591435e809a77e806dda Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 30 Apr 2024 23:59:44 -0400 Subject: [PATCH 092/533] feat(api): delete messages (#811) --- .github/workflows/ci.yml | 19 ++++++++++++++++++- .gitignore | 1 + .stats.yml | 4 ++-- Brewfile | 1 + api.md | 1 + scripts/bootstrap | 9 +++++++++ scripts/mock | 5 ++++- scripts/test | 10 +++++++--- src/resources/batches.ts | 6 +++--- src/resources/beta/threads/messages.ts | 10 ++++++++++ src/resources/fine-tuning/jobs/jobs.ts | 6 ++++++ .../beta/threads/messages.test.ts | 18 ++++++++++++++++++ 12 files changed, 80 insertions(+), 10 deletions(-) create mode 100644 Brewfile diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d6c83025f..d2a8037a3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,5 +28,22 @@ jobs: - name: Check types run: | yarn build + test: + name: test + runs-on: ubuntu-latest + if: github.repository == 'openai/openai-node' + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node + uses: actions/setup-node@v4 + with: + node-version: '18' + + - name: Bootstrap + run: ./scripts/bootstrap + + - name: Run tests + run: ./scripts/test - diff --git a/.gitignore b/.gitignore index 31b12ac63..733d72ecf 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ node_modules yarn-error.log codegen.log +Brewfile.lock.json dist /deno /*.tgz diff --git a/.stats.yml b/.stats.yml index e904583da..9797002bf 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 63 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0839c14b2b61dad4e830884410cfc3695546682ced009e50583c8bb5c44512d7.yml +configured_endpoints: 64 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-97c9a5f089049dc9eb5cee9475558049003e37e42202cab39e59d75e08b4c613.yml diff --git a/Brewfile b/Brewfile new file mode 100644 index 000000000..e4feee601 --- /dev/null +++ b/Brewfile @@ -0,0 +1 @@ +brew "node" diff --git a/api.md b/api.md index e7a8d3a31..c1ac8cfbd 100644 --- a/api.md +++ b/api.md @@ -365,6 +365,7 @@ Methods: - client.beta.threads.messages.retrieve(threadId, messageId) -> Message - client.beta.threads.messages.update(threadId, messageId, { ...params }) -> Message - client.beta.threads.messages.list(threadId, { ...params }) -> MessagesPage +- client.beta.threads.messages.del(threadId, messageId) -> MessageDeleted # Batches diff --git a/scripts/bootstrap b/scripts/bootstrap index 6752d0e6b..05dd47a61 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -4,6 +4,15 @@ set -e cd "$(dirname "$0")/.." +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then + brew bundle check >/dev/null 2>&1 || { + echo "==> Installing Homebrew dependencies…" + brew bundle + } +fi + +echo "==> Installing Node dependencies…" + PACKAGE_MANAGER=$(command -v yarn >/dev/null 2>&1 && echo "yarn" || echo "npm") $PACKAGE_MANAGER install diff --git a/scripts/mock b/scripts/mock index 2bba22723..5a8c35b72 100755 --- a/scripts/mock +++ b/scripts/mock @@ -4,7 +4,7 @@ set -e cd "$(dirname "$0")/.." -if [ -n "$1" ]; then +if [[ -n "$1" && "$1" != '--'* ]]; then URL="$1" shift else @@ -17,11 +17,14 @@ if [ -z "$URL" ]; then exit 1 fi +echo "==> Starting mock server with URL ${URL}" + # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock "$URL" &> .prism.log & # Wait for server to come online + echo -n "Waiting for server" while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do echo -n "." sleep 0.1 diff --git a/scripts/test b/scripts/test index 48b637a40..aa94b72d8 100755 --- a/scripts/test +++ b/scripts/test @@ -30,17 +30,20 @@ if ! is_overriding_api_base_url && ! prism_is_running ; then trap 'kill_server_on_port 4010' EXIT # Start the dev server - ./scripts/mock --daemon &> /dev/null + ./scripts/mock --daemon fi -if ! prism_is_running ; then +if is_overriding_api_base_url ; then + echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" + echo +elif ! prism_is_running ; then echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" echo -e "running against your OpenAPI spec." echo echo -e "To run the server, pass in the path or url of your OpenAPI" echo -e "spec to the prism command:" echo - echo -e " \$ ${YELLOW}npm exec prism mock path/to/your.openapi.yml${NC}" + echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}" echo exit 1 @@ -50,4 +53,5 @@ else fi # Run tests +echo "==> Running tests" ./node_modules/.bin/jest diff --git a/src/resources/batches.ts b/src/resources/batches.ts index fb0470dcd..2f6af03e6 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -214,10 +214,10 @@ export interface BatchCreateParams { completion_window: '24h'; /** - * The endpoint to be used for all requests in the batch. Currently only - * `/v1/chat/completions` is supported. + * The endpoint to be used for all requests in the batch. Currently + * `/v1/chat/completions` and `/v1/embeddings` are supported. */ - endpoint: '/v1/chat/completions'; + endpoint: '/v1/chat/completions' | '/v1/embeddings'; /** * The ID of an uploaded file that contains requests for the new batch. diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 559395ca5..8ce714f58 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -72,6 +72,16 @@ export class Messages extends APIResource { headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } + + /** + * Deletes a message. + */ + del(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/threads/${threadId}/messages/${messageId}`, { + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } } export class MessagesPage extends CursorPage {} diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 2469cce07..874d30047 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -180,6 +180,12 @@ export interface FineTuningJob { */ validation_file: string | null; + /** + * The Unix timestamp (in seconds) for when the fine-tuning job is estimated to + * finish. The value will be null if the fine-tuning job is not running. + */ + estimated_finish?: number | null; + /** * A list of integrations to enable for this fine-tuning job. */ diff --git a/tests/api-resources/beta/threads/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts index eb1e78133..262ff178d 100644 --- a/tests/api-resources/beta/threads/messages.test.ts +++ b/tests/api-resources/beta/threads/messages.test.ts @@ -99,4 +99,22 @@ describe('resource messages', () => { ), ).rejects.toThrow(OpenAI.NotFoundError); }); + + test('del', async () => { + const responsePromise = openai.beta.threads.messages.del('string', 'string'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + openai.beta.threads.messages.del('string', 'string', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); }); From 4fdc6fbe064740c3926bfeaeb91219d57e32577e Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 1 May 2024 00:00:04 -0400 Subject: [PATCH 093/533] release: 4.40.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 2781849c1..d645df0f2 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.39.1" + ".": "4.40.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index b7b3548fd..dc79b248c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.40.0 (2024-05-01) + +Full Changelog: [v4.39.1...v4.40.0](https://github.com/openai/openai-node/compare/v4.39.1...v4.40.0) + +### Features + +* **api:** delete messages ([#811](https://github.com/openai/openai-node/issues/811)) ([9e37dbd](https://github.com/openai/openai-node/commit/9e37dbd554e4ca48fda1577b1aad612e9d30534d)) + ## 4.39.1 (2024-04-30) Full Changelog: [v4.39.0...v4.39.1](https://github.com/openai/openai-node/compare/v4.39.0...v4.39.1) diff --git a/README.md b/README.md index 4805a65af..7aba97c47 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.39.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.40.0/mod.ts'; ``` diff --git a/package.json b/package.json index 3576a12de..6038fdfea 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.39.1", + "version": "4.40.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index cffc84e8a..3a9c87b7c 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.39.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.40.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 6e3099397..7bb1bfd37 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.39.1'; // x-release-please-version +export const VERSION = '4.40.0'; // x-release-please-version From 81a6c28c4773a0245ce9c505fc5b98d43df21beb Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 1 May 2024 17:31:45 -0400 Subject: [PATCH 094/533] chore(internal): bump prism version (#813) --- scripts/mock | 4 ++-- scripts/test | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/mock b/scripts/mock index 5a8c35b72..fe89a1d08 100755 --- a/scripts/mock +++ b/scripts/mock @@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}" # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then - npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock "$URL" &> .prism.log & + npm exec --package=@stoplight/prism-cli@~5.8 -- prism mock "$URL" &> .prism.log & # Wait for server to come online echo -n "Waiting for server" @@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then echo else - npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock "$URL" + npm exec --package=@stoplight/prism-cli@~5.8 -- prism mock "$URL" fi diff --git a/scripts/test b/scripts/test index aa94b72d8..b62a7cccd 100755 --- a/scripts/test +++ b/scripts/test @@ -54,4 +54,4 @@ fi # Run tests echo "==> Running tests" -./node_modules/.bin/jest +./node_modules/.bin/jest "$@" From d0b915a7514eda5b23d7d1e4420d1d1485ed8d0f Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 2 May 2024 04:16:28 -0400 Subject: [PATCH 095/533] chore(internal): move client class to separate file (#815) --- src/client.ts | 292 +++++++++++++++++++++++++++++++++++++++++++++++++ src/index.ts | 296 +------------------------------------------------- 2 files changed, 298 insertions(+), 290 deletions(-) create mode 100644 src/client.ts diff --git a/src/client.ts b/src/client.ts new file mode 100644 index 000000000..493dcbf82 --- /dev/null +++ b/src/client.ts @@ -0,0 +1,292 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import * as Core from './core'; +import * as Errors from './error'; +import { type Agent } from './_shims/index'; +import * as Uploads from './uploads'; +import * as Pagination from 'openai/pagination'; +import * as API from 'openai/resources/index'; + +export interface ClientOptions { + /** + * Defaults to process.env['OPENAI_API_KEY']. + */ + apiKey?: string | undefined; + + /** + * Defaults to process.env['OPENAI_ORG_ID']. + */ + organization?: string | null | undefined; + + /** + * Defaults to process.env['OPENAI_PROJECT_ID']. + */ + project?: string | null | undefined; + + /** + * Override the default base URL for the API, e.g., "https://api.example.com/v2/" + * + * Defaults to process.env['OPENAI_BASE_URL']. + */ + baseURL?: string | null | undefined; + + /** + * The maximum amount of time (in milliseconds) that the client should wait for a response + * from the server before timing out a single request. + * + * Note that request timeouts are retried by default, so in a worst-case scenario you may wait + * much longer than this timeout before the promise succeeds or fails. + */ + timeout?: number; + + /** + * An HTTP agent used to manage HTTP(S) connections. + * + * If not provided, an agent will be constructed by default in the Node.js environment, + * otherwise no agent is used. + */ + httpAgent?: Agent; + + /** + * Specify a custom `fetch` function implementation. + * + * If not provided, we use `node-fetch` on Node.js and otherwise expect that `fetch` is + * defined globally. + */ + fetch?: Core.Fetch | undefined; + + /** + * The maximum number of times that the client will retry a request in case of a + * temporary failure, like a network error or a 5XX error from the server. + * + * @default 2 + */ + maxRetries?: number; + + /** + * Default headers to include with every request to the API. + * + * These can be removed in individual requests by explicitly setting the + * header to `undefined` or `null` in request options. + */ + defaultHeaders?: Core.Headers; + + /** + * Default query parameters to include with every request to the API. + * + * These can be removed in individual requests by explicitly setting the + * param to `undefined` in request options. + */ + defaultQuery?: Core.DefaultQuery; + + /** + * By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. + * Only set this option to `true` if you understand the risks and have appropriate mitigations in place. + */ + dangerouslyAllowBrowser?: boolean; +} + +/** API Client for interfacing with the OpenAI API. */ +export class OpenAI extends Core.APIClient { + apiKey: string; + organization: string | null; + project: string | null; + + private _options: ClientOptions; + + /** + * API Client for interfacing with the OpenAI API. + * + * @param {string | undefined} [opts.apiKey=process.env['OPENAI_API_KEY'] ?? undefined] + * @param {string | null | undefined} [opts.organization=process.env['OPENAI_ORG_ID'] ?? null] + * @param {string | null | undefined} [opts.project=process.env['OPENAI_PROJECT_ID'] ?? null] + * @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL'] ?? https://api.openai.com/v1] - Override the default base URL for the API. + * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out. + * @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections. + * @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation. + * @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request. + * @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API. + * @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API. + * @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. + */ + constructor({ + baseURL = Core.readEnv('OPENAI_BASE_URL'), + apiKey = Core.readEnv('OPENAI_API_KEY'), + organization = Core.readEnv('OPENAI_ORG_ID') ?? null, + project = Core.readEnv('OPENAI_PROJECT_ID') ?? null, + ...opts + }: ClientOptions = {}) { + if (apiKey === undefined) { + throw new Errors.OpenAIError( + "The OPENAI_API_KEY environment variable is missing or empty; either provide it, or instantiate the OpenAI client with an apiKey option, like new OpenAI({ apiKey: 'My API Key' }).", + ); + } + + const options: ClientOptions = { + apiKey, + organization, + project, + ...opts, + baseURL: baseURL || `https://api.openai.com/v1`, + }; + + if (!options.dangerouslyAllowBrowser && Core.isRunningInBrowser()) { + throw new Errors.OpenAIError( + "It looks like you're running in a browser-like environment.\n\nThis is disabled by default, as it risks exposing your secret API credentials to attackers.\nIf you understand the risks and have appropriate mitigations in place,\nyou can set the `dangerouslyAllowBrowser` option to `true`, e.g.,\n\nnew OpenAI({ apiKey, dangerouslyAllowBrowser: true });\n\nhttps://help.openai.com/en/articles/5112595-best-practices-for-api-key-safety\n", + ); + } + + super({ + baseURL: options.baseURL!, + timeout: options.timeout ?? 600000 /* 10 minutes */, + httpAgent: options.httpAgent, + maxRetries: options.maxRetries, + fetch: options.fetch, + }); + this._options = options; + + this.apiKey = apiKey; + this.organization = organization; + this.project = project; + } + + completions: API.Completions = new API.Completions(this); + chat: API.Chat = new API.Chat(this); + embeddings: API.Embeddings = new API.Embeddings(this); + files: API.Files = new API.Files(this); + images: API.Images = new API.Images(this); + audio: API.Audio = new API.Audio(this); + moderations: API.Moderations = new API.Moderations(this); + models: API.Models = new API.Models(this); + fineTuning: API.FineTuning = new API.FineTuning(this); + beta: API.Beta = new API.Beta(this); + batches: API.Batches = new API.Batches(this); + + protected override defaultQuery(): Core.DefaultQuery | undefined { + return this._options.defaultQuery; + } + + protected override defaultHeaders(opts: Core.FinalRequestOptions): Core.Headers { + return { + ...super.defaultHeaders(opts), + 'OpenAI-Organization': this.organization, + 'OpenAI-Project': this.project, + ...this._options.defaultHeaders, + }; + } + + protected override authHeaders(opts: Core.FinalRequestOptions): Core.Headers { + return { Authorization: `Bearer ${this.apiKey}` }; + } + + static OpenAI = this; + + static OpenAIError = Errors.OpenAIError; + static APIError = Errors.APIError; + static APIConnectionError = Errors.APIConnectionError; + static APIConnectionTimeoutError = Errors.APIConnectionTimeoutError; + static APIUserAbortError = Errors.APIUserAbortError; + static NotFoundError = Errors.NotFoundError; + static ConflictError = Errors.ConflictError; + static RateLimitError = Errors.RateLimitError; + static BadRequestError = Errors.BadRequestError; + static AuthenticationError = Errors.AuthenticationError; + static InternalServerError = Errors.InternalServerError; + static PermissionDeniedError = Errors.PermissionDeniedError; + static UnprocessableEntityError = Errors.UnprocessableEntityError; + + static toFile = Uploads.toFile; + static fileFromPath = Uploads.fileFromPath; +} + +export namespace OpenAI { + export import RequestOptions = Core.RequestOptions; + + export import Page = Pagination.Page; + export import PageResponse = Pagination.PageResponse; + + export import CursorPage = Pagination.CursorPage; + export import CursorPageParams = Pagination.CursorPageParams; + export import CursorPageResponse = Pagination.CursorPageResponse; + + export import Completions = API.Completions; + export import Completion = API.Completion; + export import CompletionChoice = API.CompletionChoice; + export import CompletionUsage = API.CompletionUsage; + export import CompletionCreateParams = API.CompletionCreateParams; + export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; + export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; + + export import Chat = API.Chat; + export import ChatModel = API.ChatModel; + export import ChatCompletion = API.ChatCompletion; + export import ChatCompletionAssistantMessageParam = API.ChatCompletionAssistantMessageParam; + export import ChatCompletionChunk = API.ChatCompletionChunk; + export import ChatCompletionContentPart = API.ChatCompletionContentPart; + export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage; + export import ChatCompletionContentPartText = API.ChatCompletionContentPartText; + export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption; + export import ChatCompletionFunctionMessageParam = API.ChatCompletionFunctionMessageParam; + export import ChatCompletionMessage = API.ChatCompletionMessage; + export import ChatCompletionMessageParam = API.ChatCompletionMessageParam; + export import ChatCompletionMessageToolCall = API.ChatCompletionMessageToolCall; + export import ChatCompletionNamedToolChoice = API.ChatCompletionNamedToolChoice; + export import ChatCompletionRole = API.ChatCompletionRole; + export import ChatCompletionSystemMessageParam = API.ChatCompletionSystemMessageParam; + export import ChatCompletionTokenLogprob = API.ChatCompletionTokenLogprob; + export import ChatCompletionTool = API.ChatCompletionTool; + export import ChatCompletionToolChoiceOption = API.ChatCompletionToolChoiceOption; + export import ChatCompletionToolMessageParam = API.ChatCompletionToolMessageParam; + export import ChatCompletionUserMessageParam = API.ChatCompletionUserMessageParam; + export import ChatCompletionCreateParams = API.ChatCompletionCreateParams; + export import ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; + export import ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; + + export import Embeddings = API.Embeddings; + export import CreateEmbeddingResponse = API.CreateEmbeddingResponse; + export import Embedding = API.Embedding; + export import EmbeddingCreateParams = API.EmbeddingCreateParams; + + export import Files = API.Files; + export import FileContent = API.FileContent; + export import FileDeleted = API.FileDeleted; + export import FileObject = API.FileObject; + export import FileObjectsPage = API.FileObjectsPage; + export import FileCreateParams = API.FileCreateParams; + export import FileListParams = API.FileListParams; + + export import Images = API.Images; + export import Image = API.Image; + export import ImagesResponse = API.ImagesResponse; + export import ImageCreateVariationParams = API.ImageCreateVariationParams; + export import ImageEditParams = API.ImageEditParams; + export import ImageGenerateParams = API.ImageGenerateParams; + + export import Audio = API.Audio; + + export import Moderations = API.Moderations; + export import Moderation = API.Moderation; + export import ModerationCreateResponse = API.ModerationCreateResponse; + export import ModerationCreateParams = API.ModerationCreateParams; + + export import Models = API.Models; + export import Model = API.Model; + export import ModelDeleted = API.ModelDeleted; + export import ModelsPage = API.ModelsPage; + + export import FineTuning = API.FineTuning; + + export import Beta = API.Beta; + + export import Batches = API.Batches; + export import Batch = API.Batch; + export import BatchError = API.BatchError; + export import BatchRequestCounts = API.BatchRequestCounts; + export import BatchesPage = API.BatchesPage; + export import BatchCreateParams = API.BatchCreateParams; + export import BatchListParams = API.BatchListParams; + + export import ErrorObject = API.ErrorObject; + export import FunctionDefinition = API.FunctionDefinition; + export import FunctionParameters = API.FunctionParameters; +} diff --git a/src/index.ts b/src/index.ts index 1741a4816..61989a318 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,203 +1,14 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from './core'; import * as Errors from './error'; -import { type Agent } from './_shims/index'; import * as Uploads from './uploads'; -import * as Pagination from 'openai/pagination'; -import * as API from 'openai/resources/index'; +import { OpenAI } from './client'; -export interface ClientOptions { - /** - * Defaults to process.env['OPENAI_API_KEY']. - */ - apiKey?: string | undefined; - - /** - * Defaults to process.env['OPENAI_ORG_ID']. - */ - organization?: string | null | undefined; - - /** - * Defaults to process.env['OPENAI_PROJECT_ID']. - */ - project?: string | null | undefined; - - /** - * Override the default base URL for the API, e.g., "https://api.example.com/v2/" - * - * Defaults to process.env['OPENAI_BASE_URL']. - */ - baseURL?: string | null | undefined; - - /** - * The maximum amount of time (in milliseconds) that the client should wait for a response - * from the server before timing out a single request. - * - * Note that request timeouts are retried by default, so in a worst-case scenario you may wait - * much longer than this timeout before the promise succeeds or fails. - */ - timeout?: number; - - /** - * An HTTP agent used to manage HTTP(S) connections. - * - * If not provided, an agent will be constructed by default in the Node.js environment, - * otherwise no agent is used. - */ - httpAgent?: Agent; - - /** - * Specify a custom `fetch` function implementation. - * - * If not provided, we use `node-fetch` on Node.js and otherwise expect that `fetch` is - * defined globally. - */ - fetch?: Core.Fetch | undefined; - - /** - * The maximum number of times that the client will retry a request in case of a - * temporary failure, like a network error or a 5XX error from the server. - * - * @default 2 - */ - maxRetries?: number; - - /** - * Default headers to include with every request to the API. - * - * These can be removed in individual requests by explicitly setting the - * header to `undefined` or `null` in request options. - */ - defaultHeaders?: Core.Headers; - - /** - * Default query parameters to include with every request to the API. - * - * These can be removed in individual requests by explicitly setting the - * param to `undefined` in request options. - */ - defaultQuery?: Core.DefaultQuery; - - /** - * By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. - * Only set this option to `true` if you understand the risks and have appropriate mitigations in place. - */ - dangerouslyAllowBrowser?: boolean; -} - -/** API Client for interfacing with the OpenAI API. */ -export class OpenAI extends Core.APIClient { - apiKey: string; - organization: string | null; - project: string | null; - - private _options: ClientOptions; - - /** - * API Client for interfacing with the OpenAI API. - * - * @param {string | undefined} [opts.apiKey=process.env['OPENAI_API_KEY'] ?? undefined] - * @param {string | null | undefined} [opts.organization=process.env['OPENAI_ORG_ID'] ?? null] - * @param {string | null | undefined} [opts.project=process.env['OPENAI_PROJECT_ID'] ?? null] - * @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL'] ?? https://api.openai.com/v1] - Override the default base URL for the API. - * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out. - * @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections. - * @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation. - * @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request. - * @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API. - * @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API. - * @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. - */ - constructor({ - baseURL = Core.readEnv('OPENAI_BASE_URL'), - apiKey = Core.readEnv('OPENAI_API_KEY'), - organization = Core.readEnv('OPENAI_ORG_ID') ?? null, - project = Core.readEnv('OPENAI_PROJECT_ID') ?? null, - ...opts - }: ClientOptions = {}) { - if (apiKey === undefined) { - throw new Errors.OpenAIError( - "The OPENAI_API_KEY environment variable is missing or empty; either provide it, or instantiate the OpenAI client with an apiKey option, like new OpenAI({ apiKey: 'My API Key' }).", - ); - } - - const options: ClientOptions = { - apiKey, - organization, - project, - ...opts, - baseURL: baseURL || `https://api.openai.com/v1`, - }; - - if (!options.dangerouslyAllowBrowser && Core.isRunningInBrowser()) { - throw new Errors.OpenAIError( - "It looks like you're running in a browser-like environment.\n\nThis is disabled by default, as it risks exposing your secret API credentials to attackers.\nIf you understand the risks and have appropriate mitigations in place,\nyou can set the `dangerouslyAllowBrowser` option to `true`, e.g.,\n\nnew OpenAI({ apiKey, dangerouslyAllowBrowser: true });\n\nhttps://help.openai.com/en/articles/5112595-best-practices-for-api-key-safety\n", - ); - } - - super({ - baseURL: options.baseURL!, - timeout: options.timeout ?? 600000 /* 10 minutes */, - httpAgent: options.httpAgent, - maxRetries: options.maxRetries, - fetch: options.fetch, - }); - this._options = options; - - this.apiKey = apiKey; - this.organization = organization; - this.project = project; - } - - completions: API.Completions = new API.Completions(this); - chat: API.Chat = new API.Chat(this); - embeddings: API.Embeddings = new API.Embeddings(this); - files: API.Files = new API.Files(this); - images: API.Images = new API.Images(this); - audio: API.Audio = new API.Audio(this); - moderations: API.Moderations = new API.Moderations(this); - models: API.Models = new API.Models(this); - fineTuning: API.FineTuning = new API.FineTuning(this); - beta: API.Beta = new API.Beta(this); - batches: API.Batches = new API.Batches(this); - - protected override defaultQuery(): Core.DefaultQuery | undefined { - return this._options.defaultQuery; - } - - protected override defaultHeaders(opts: Core.FinalRequestOptions): Core.Headers { - return { - ...super.defaultHeaders(opts), - 'OpenAI-Organization': this.organization, - 'OpenAI-Project': this.project, - ...this._options.defaultHeaders, - }; - } - - protected override authHeaders(opts: Core.FinalRequestOptions): Core.Headers { - return { Authorization: `Bearer ${this.apiKey}` }; - } - - static OpenAI = this; - - static OpenAIError = Errors.OpenAIError; - static APIError = Errors.APIError; - static APIConnectionError = Errors.APIConnectionError; - static APIConnectionTimeoutError = Errors.APIConnectionTimeoutError; - static APIUserAbortError = Errors.APIUserAbortError; - static NotFoundError = Errors.NotFoundError; - static ConflictError = Errors.ConflictError; - static RateLimitError = Errors.RateLimitError; - static BadRequestError = Errors.BadRequestError; - static AuthenticationError = Errors.AuthenticationError; - static InternalServerError = Errors.InternalServerError; - static PermissionDeniedError = Errors.PermissionDeniedError; - static UnprocessableEntityError = Errors.UnprocessableEntityError; +export { OpenAI }; +export default OpenAI; - static toFile = Uploads.toFile; - static fileFromPath = Uploads.fileFromPath; -} +export import toFile = Uploads.toFile; +export import fileFromPath = Uploads.fileFromPath; export const { OpenAIError, @@ -215,99 +26,4 @@ export const { UnprocessableEntityError, } = Errors; -export import toFile = Uploads.toFile; -export import fileFromPath = Uploads.fileFromPath; - -export namespace OpenAI { - export import RequestOptions = Core.RequestOptions; - - export import Page = Pagination.Page; - export import PageResponse = Pagination.PageResponse; - - export import CursorPage = Pagination.CursorPage; - export import CursorPageParams = Pagination.CursorPageParams; - export import CursorPageResponse = Pagination.CursorPageResponse; - - export import Completions = API.Completions; - export import Completion = API.Completion; - export import CompletionChoice = API.CompletionChoice; - export import CompletionUsage = API.CompletionUsage; - export import CompletionCreateParams = API.CompletionCreateParams; - export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; - export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; - - export import Chat = API.Chat; - export import ChatModel = API.ChatModel; - export import ChatCompletion = API.ChatCompletion; - export import ChatCompletionAssistantMessageParam = API.ChatCompletionAssistantMessageParam; - export import ChatCompletionChunk = API.ChatCompletionChunk; - export import ChatCompletionContentPart = API.ChatCompletionContentPart; - export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage; - export import ChatCompletionContentPartText = API.ChatCompletionContentPartText; - export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption; - export import ChatCompletionFunctionMessageParam = API.ChatCompletionFunctionMessageParam; - export import ChatCompletionMessage = API.ChatCompletionMessage; - export import ChatCompletionMessageParam = API.ChatCompletionMessageParam; - export import ChatCompletionMessageToolCall = API.ChatCompletionMessageToolCall; - export import ChatCompletionNamedToolChoice = API.ChatCompletionNamedToolChoice; - export import ChatCompletionRole = API.ChatCompletionRole; - export import ChatCompletionSystemMessageParam = API.ChatCompletionSystemMessageParam; - export import ChatCompletionTokenLogprob = API.ChatCompletionTokenLogprob; - export import ChatCompletionTool = API.ChatCompletionTool; - export import ChatCompletionToolChoiceOption = API.ChatCompletionToolChoiceOption; - export import ChatCompletionToolMessageParam = API.ChatCompletionToolMessageParam; - export import ChatCompletionUserMessageParam = API.ChatCompletionUserMessageParam; - export import ChatCompletionCreateParams = API.ChatCompletionCreateParams; - export import ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; - export import ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; - - export import Embeddings = API.Embeddings; - export import CreateEmbeddingResponse = API.CreateEmbeddingResponse; - export import Embedding = API.Embedding; - export import EmbeddingCreateParams = API.EmbeddingCreateParams; - - export import Files = API.Files; - export import FileContent = API.FileContent; - export import FileDeleted = API.FileDeleted; - export import FileObject = API.FileObject; - export import FileObjectsPage = API.FileObjectsPage; - export import FileCreateParams = API.FileCreateParams; - export import FileListParams = API.FileListParams; - - export import Images = API.Images; - export import Image = API.Image; - export import ImagesResponse = API.ImagesResponse; - export import ImageCreateVariationParams = API.ImageCreateVariationParams; - export import ImageEditParams = API.ImageEditParams; - export import ImageGenerateParams = API.ImageGenerateParams; - - export import Audio = API.Audio; - - export import Moderations = API.Moderations; - export import Moderation = API.Moderation; - export import ModerationCreateResponse = API.ModerationCreateResponse; - export import ModerationCreateParams = API.ModerationCreateParams; - - export import Models = API.Models; - export import Model = API.Model; - export import ModelDeleted = API.ModelDeleted; - export import ModelsPage = API.ModelsPage; - - export import FineTuning = API.FineTuning; - - export import Beta = API.Beta; - - export import Batches = API.Batches; - export import Batch = API.Batch; - export import BatchError = API.BatchError; - export import BatchRequestCounts = API.BatchRequestCounts; - export import BatchesPage = API.BatchesPage; - export import BatchCreateParams = API.BatchCreateParams; - export import BatchListParams = API.BatchListParams; - - export import ErrorObject = API.ErrorObject; - export import FunctionDefinition = API.FunctionDefinition; - export import FunctionParameters = API.FunctionParameters; -} - -export default OpenAI; +export * from './client'; From ab08790b98575b51c705b1ce09ac96b50870fbfb Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 2 May 2024 04:16:49 -0400 Subject: [PATCH 096/533] release: 4.40.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 14 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d645df0f2..0493e5bac 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.40.0" + ".": "4.40.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index dc79b248c..6f5a077cc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.40.1 (2024-05-02) + +Full Changelog: [v4.40.0...v4.40.1](https://github.com/openai/openai-node/compare/v4.40.0...v4.40.1) + +### Chores + +* **internal:** bump prism version ([#813](https://github.com/openai/openai-node/issues/813)) ([81a6c28](https://github.com/openai/openai-node/commit/81a6c28c4773a0245ce9c505fc5b98d43df21beb)) +* **internal:** move client class to separate file ([#815](https://github.com/openai/openai-node/issues/815)) ([d0b915a](https://github.com/openai/openai-node/commit/d0b915a7514eda5b23d7d1e4420d1d1485ed8d0f)) + ## 4.40.0 (2024-05-01) Full Changelog: [v4.39.1...v4.40.0](https://github.com/openai/openai-node/compare/v4.39.1...v4.40.0) diff --git a/README.md b/README.md index 7aba97c47..e6d723367 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.40.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.40.1/mod.ts'; ``` diff --git a/package.json b/package.json index 6038fdfea..416dd8d26 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.40.0", + "version": "4.40.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 3a9c87b7c..67e7626eb 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.40.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.40.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 7bb1bfd37..0dde5072f 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.40.0'; // x-release-please-version +export const VERSION = '4.40.1'; // x-release-please-version From df516b7b349a4f5af218ef358483839e20f700a5 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 3 May 2024 06:17:24 -0400 Subject: [PATCH 097/533] fix(vectorStores): correct uploadAndPoll method (#817) --- src/resources/beta/vector-stores/files.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index a18211221..f8f8cddc5 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -164,7 +164,7 @@ export class Files extends APIResource { file: Uploadable, options?: Core.RequestOptions & { pollIntervalMs?: number }, ): Promise { - const fileInfo = await this._client.files.create({ file: file, purpose: 'assistants' }, options); + const fileInfo = await this.upload(vectorStoreId, file, options); return await this.poll(vectorStoreId, fileInfo.id, options); } } From 01c40e7dd71f52e98cf55088ae160a57bfc67569 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 3 May 2024 09:16:05 -0400 Subject: [PATCH 098/533] fix(package): revert recent client file change (#819) this subtly broke some setups that we didn't have tests for --- ecosystem-tests/cli.ts | 4 + ecosystem-tests/node-js/package-lock.json | 244 ++++++++++++++++++ ecosystem-tests/node-js/package.json | 14 + ecosystem-tests/node-js/test.js | 8 + src/client.ts | 292 --------------------- src/index.ts | 296 +++++++++++++++++++++- 6 files changed, 560 insertions(+), 298 deletions(-) create mode 100644 ecosystem-tests/node-js/package-lock.json create mode 100644 ecosystem-tests/node-js/package.json create mode 100644 ecosystem-tests/node-js/test.js delete mode 100644 src/client.ts diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index a3c1f27a4..e315ccd6c 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -25,6 +25,10 @@ const projectRunners = { 'node-ts-esm': defaultNodeRunner, 'node-ts-esm-web': defaultNodeRunner, 'node-ts-esm-auto': defaultNodeRunner, + 'node-js': async () => { + await installPackage(); + await run('node', ['test.js']); + }, 'ts-browser-webpack': async () => { await installPackage(); diff --git a/ecosystem-tests/node-js/package-lock.json b/ecosystem-tests/node-js/package-lock.json new file mode 100644 index 000000000..bb59ccb92 --- /dev/null +++ b/ecosystem-tests/node-js/package-lock.json @@ -0,0 +1,244 @@ +{ + "name": "node-js", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "foo", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "openai": "^4.40.1" + } + }, + "node_modules/@types/node": { + "version": "18.19.31", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.31.tgz", + "integrity": "sha512-ArgCD39YpyyrtFKIqMDvjz79jto5fcI/SVUs2HwB+f0dAzq68yqOdyaSivLiLugSziTpNXLQrVb7RZFmdZzbhA==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/node-fetch": { + "version": "2.6.11", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz", + "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==", + "dependencies": { + "@types/node": "*", + "form-data": "^4.0.0" + } + }, + "node_modules/abort-controller": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, + "node_modules/agentkeepalive": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz", + "integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==", + "dependencies": { + "humanize-ms": "^1.2.1" + }, + "engines": { + "node": ">= 8.0.0" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", + "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/form-data-encoder": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", + "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==" + }, + "node_modules/formdata-node": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", + "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", + "dependencies": { + "node-domexception": "1.0.0", + "web-streams-polyfill": "4.0.0-beta.3" + }, + "engines": { + "node": ">= 12.20" + } + }, + "node_modules/formdata-node/node_modules/web-streams-polyfill": { + "version": "4.0.0-beta.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", + "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==", + "engines": { + "node": ">= 14" + } + }, + "node_modules/humanize-ms": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", + "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", + "dependencies": { + "ms": "^2.0.0" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "engines": { + "node": ">=10.5.0" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/openai": { + "version": "4.40.1", + "resolved": "https://registry.npmjs.org/openai/-/openai-4.40.1.tgz", + "integrity": "sha512-mS7LerF4fY1/we0aKGGwIWtosTJFLKuNbBWMBR/G1TAZUHoktAdod0dqIrlQvSD39uS6jNEEbT7jRsXmzfEPBw==", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7", + "web-streams-polyfill": "^3.2.1" + }, + "bin": { + "openai": "bin/cli" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/web-streams-polyfill": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + } + } +} diff --git a/ecosystem-tests/node-js/package.json b/ecosystem-tests/node-js/package.json new file mode 100644 index 000000000..63f858014 --- /dev/null +++ b/ecosystem-tests/node-js/package.json @@ -0,0 +1,14 @@ +{ + "name": "node-js", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "", + "license": "ISC", + "dependencies": { + "openai": "^4.40.1" + } +} diff --git a/ecosystem-tests/node-js/test.js b/ecosystem-tests/node-js/test.js new file mode 100644 index 000000000..7f9f21736 --- /dev/null +++ b/ecosystem-tests/node-js/test.js @@ -0,0 +1,8 @@ +const openaiKey = "a valid OpenAI key" +const OpenAI = require('openai'); + +console.log(OpenAI) + +const openai = new OpenAI({ + apiKey: openaiKey, +}); diff --git a/src/client.ts b/src/client.ts deleted file mode 100644 index 493dcbf82..000000000 --- a/src/client.ts +++ /dev/null @@ -1,292 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import * as Core from './core'; -import * as Errors from './error'; -import { type Agent } from './_shims/index'; -import * as Uploads from './uploads'; -import * as Pagination from 'openai/pagination'; -import * as API from 'openai/resources/index'; - -export interface ClientOptions { - /** - * Defaults to process.env['OPENAI_API_KEY']. - */ - apiKey?: string | undefined; - - /** - * Defaults to process.env['OPENAI_ORG_ID']. - */ - organization?: string | null | undefined; - - /** - * Defaults to process.env['OPENAI_PROJECT_ID']. - */ - project?: string | null | undefined; - - /** - * Override the default base URL for the API, e.g., "https://api.example.com/v2/" - * - * Defaults to process.env['OPENAI_BASE_URL']. - */ - baseURL?: string | null | undefined; - - /** - * The maximum amount of time (in milliseconds) that the client should wait for a response - * from the server before timing out a single request. - * - * Note that request timeouts are retried by default, so in a worst-case scenario you may wait - * much longer than this timeout before the promise succeeds or fails. - */ - timeout?: number; - - /** - * An HTTP agent used to manage HTTP(S) connections. - * - * If not provided, an agent will be constructed by default in the Node.js environment, - * otherwise no agent is used. - */ - httpAgent?: Agent; - - /** - * Specify a custom `fetch` function implementation. - * - * If not provided, we use `node-fetch` on Node.js and otherwise expect that `fetch` is - * defined globally. - */ - fetch?: Core.Fetch | undefined; - - /** - * The maximum number of times that the client will retry a request in case of a - * temporary failure, like a network error or a 5XX error from the server. - * - * @default 2 - */ - maxRetries?: number; - - /** - * Default headers to include with every request to the API. - * - * These can be removed in individual requests by explicitly setting the - * header to `undefined` or `null` in request options. - */ - defaultHeaders?: Core.Headers; - - /** - * Default query parameters to include with every request to the API. - * - * These can be removed in individual requests by explicitly setting the - * param to `undefined` in request options. - */ - defaultQuery?: Core.DefaultQuery; - - /** - * By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. - * Only set this option to `true` if you understand the risks and have appropriate mitigations in place. - */ - dangerouslyAllowBrowser?: boolean; -} - -/** API Client for interfacing with the OpenAI API. */ -export class OpenAI extends Core.APIClient { - apiKey: string; - organization: string | null; - project: string | null; - - private _options: ClientOptions; - - /** - * API Client for interfacing with the OpenAI API. - * - * @param {string | undefined} [opts.apiKey=process.env['OPENAI_API_KEY'] ?? undefined] - * @param {string | null | undefined} [opts.organization=process.env['OPENAI_ORG_ID'] ?? null] - * @param {string | null | undefined} [opts.project=process.env['OPENAI_PROJECT_ID'] ?? null] - * @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL'] ?? https://api.openai.com/v1] - Override the default base URL for the API. - * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out. - * @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections. - * @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation. - * @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request. - * @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API. - * @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API. - * @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. - */ - constructor({ - baseURL = Core.readEnv('OPENAI_BASE_URL'), - apiKey = Core.readEnv('OPENAI_API_KEY'), - organization = Core.readEnv('OPENAI_ORG_ID') ?? null, - project = Core.readEnv('OPENAI_PROJECT_ID') ?? null, - ...opts - }: ClientOptions = {}) { - if (apiKey === undefined) { - throw new Errors.OpenAIError( - "The OPENAI_API_KEY environment variable is missing or empty; either provide it, or instantiate the OpenAI client with an apiKey option, like new OpenAI({ apiKey: 'My API Key' }).", - ); - } - - const options: ClientOptions = { - apiKey, - organization, - project, - ...opts, - baseURL: baseURL || `https://api.openai.com/v1`, - }; - - if (!options.dangerouslyAllowBrowser && Core.isRunningInBrowser()) { - throw new Errors.OpenAIError( - "It looks like you're running in a browser-like environment.\n\nThis is disabled by default, as it risks exposing your secret API credentials to attackers.\nIf you understand the risks and have appropriate mitigations in place,\nyou can set the `dangerouslyAllowBrowser` option to `true`, e.g.,\n\nnew OpenAI({ apiKey, dangerouslyAllowBrowser: true });\n\nhttps://help.openai.com/en/articles/5112595-best-practices-for-api-key-safety\n", - ); - } - - super({ - baseURL: options.baseURL!, - timeout: options.timeout ?? 600000 /* 10 minutes */, - httpAgent: options.httpAgent, - maxRetries: options.maxRetries, - fetch: options.fetch, - }); - this._options = options; - - this.apiKey = apiKey; - this.organization = organization; - this.project = project; - } - - completions: API.Completions = new API.Completions(this); - chat: API.Chat = new API.Chat(this); - embeddings: API.Embeddings = new API.Embeddings(this); - files: API.Files = new API.Files(this); - images: API.Images = new API.Images(this); - audio: API.Audio = new API.Audio(this); - moderations: API.Moderations = new API.Moderations(this); - models: API.Models = new API.Models(this); - fineTuning: API.FineTuning = new API.FineTuning(this); - beta: API.Beta = new API.Beta(this); - batches: API.Batches = new API.Batches(this); - - protected override defaultQuery(): Core.DefaultQuery | undefined { - return this._options.defaultQuery; - } - - protected override defaultHeaders(opts: Core.FinalRequestOptions): Core.Headers { - return { - ...super.defaultHeaders(opts), - 'OpenAI-Organization': this.organization, - 'OpenAI-Project': this.project, - ...this._options.defaultHeaders, - }; - } - - protected override authHeaders(opts: Core.FinalRequestOptions): Core.Headers { - return { Authorization: `Bearer ${this.apiKey}` }; - } - - static OpenAI = this; - - static OpenAIError = Errors.OpenAIError; - static APIError = Errors.APIError; - static APIConnectionError = Errors.APIConnectionError; - static APIConnectionTimeoutError = Errors.APIConnectionTimeoutError; - static APIUserAbortError = Errors.APIUserAbortError; - static NotFoundError = Errors.NotFoundError; - static ConflictError = Errors.ConflictError; - static RateLimitError = Errors.RateLimitError; - static BadRequestError = Errors.BadRequestError; - static AuthenticationError = Errors.AuthenticationError; - static InternalServerError = Errors.InternalServerError; - static PermissionDeniedError = Errors.PermissionDeniedError; - static UnprocessableEntityError = Errors.UnprocessableEntityError; - - static toFile = Uploads.toFile; - static fileFromPath = Uploads.fileFromPath; -} - -export namespace OpenAI { - export import RequestOptions = Core.RequestOptions; - - export import Page = Pagination.Page; - export import PageResponse = Pagination.PageResponse; - - export import CursorPage = Pagination.CursorPage; - export import CursorPageParams = Pagination.CursorPageParams; - export import CursorPageResponse = Pagination.CursorPageResponse; - - export import Completions = API.Completions; - export import Completion = API.Completion; - export import CompletionChoice = API.CompletionChoice; - export import CompletionUsage = API.CompletionUsage; - export import CompletionCreateParams = API.CompletionCreateParams; - export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; - export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; - - export import Chat = API.Chat; - export import ChatModel = API.ChatModel; - export import ChatCompletion = API.ChatCompletion; - export import ChatCompletionAssistantMessageParam = API.ChatCompletionAssistantMessageParam; - export import ChatCompletionChunk = API.ChatCompletionChunk; - export import ChatCompletionContentPart = API.ChatCompletionContentPart; - export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage; - export import ChatCompletionContentPartText = API.ChatCompletionContentPartText; - export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption; - export import ChatCompletionFunctionMessageParam = API.ChatCompletionFunctionMessageParam; - export import ChatCompletionMessage = API.ChatCompletionMessage; - export import ChatCompletionMessageParam = API.ChatCompletionMessageParam; - export import ChatCompletionMessageToolCall = API.ChatCompletionMessageToolCall; - export import ChatCompletionNamedToolChoice = API.ChatCompletionNamedToolChoice; - export import ChatCompletionRole = API.ChatCompletionRole; - export import ChatCompletionSystemMessageParam = API.ChatCompletionSystemMessageParam; - export import ChatCompletionTokenLogprob = API.ChatCompletionTokenLogprob; - export import ChatCompletionTool = API.ChatCompletionTool; - export import ChatCompletionToolChoiceOption = API.ChatCompletionToolChoiceOption; - export import ChatCompletionToolMessageParam = API.ChatCompletionToolMessageParam; - export import ChatCompletionUserMessageParam = API.ChatCompletionUserMessageParam; - export import ChatCompletionCreateParams = API.ChatCompletionCreateParams; - export import ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; - export import ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; - - export import Embeddings = API.Embeddings; - export import CreateEmbeddingResponse = API.CreateEmbeddingResponse; - export import Embedding = API.Embedding; - export import EmbeddingCreateParams = API.EmbeddingCreateParams; - - export import Files = API.Files; - export import FileContent = API.FileContent; - export import FileDeleted = API.FileDeleted; - export import FileObject = API.FileObject; - export import FileObjectsPage = API.FileObjectsPage; - export import FileCreateParams = API.FileCreateParams; - export import FileListParams = API.FileListParams; - - export import Images = API.Images; - export import Image = API.Image; - export import ImagesResponse = API.ImagesResponse; - export import ImageCreateVariationParams = API.ImageCreateVariationParams; - export import ImageEditParams = API.ImageEditParams; - export import ImageGenerateParams = API.ImageGenerateParams; - - export import Audio = API.Audio; - - export import Moderations = API.Moderations; - export import Moderation = API.Moderation; - export import ModerationCreateResponse = API.ModerationCreateResponse; - export import ModerationCreateParams = API.ModerationCreateParams; - - export import Models = API.Models; - export import Model = API.Model; - export import ModelDeleted = API.ModelDeleted; - export import ModelsPage = API.ModelsPage; - - export import FineTuning = API.FineTuning; - - export import Beta = API.Beta; - - export import Batches = API.Batches; - export import Batch = API.Batch; - export import BatchError = API.BatchError; - export import BatchRequestCounts = API.BatchRequestCounts; - export import BatchesPage = API.BatchesPage; - export import BatchCreateParams = API.BatchCreateParams; - export import BatchListParams = API.BatchListParams; - - export import ErrorObject = API.ErrorObject; - export import FunctionDefinition = API.FunctionDefinition; - export import FunctionParameters = API.FunctionParameters; -} diff --git a/src/index.ts b/src/index.ts index 61989a318..1741a4816 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,14 +1,203 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +import * as Core from './core'; import * as Errors from './error'; +import { type Agent } from './_shims/index'; import * as Uploads from './uploads'; -import { OpenAI } from './client'; +import * as Pagination from 'openai/pagination'; +import * as API from 'openai/resources/index'; -export { OpenAI }; -export default OpenAI; +export interface ClientOptions { + /** + * Defaults to process.env['OPENAI_API_KEY']. + */ + apiKey?: string | undefined; -export import toFile = Uploads.toFile; -export import fileFromPath = Uploads.fileFromPath; + /** + * Defaults to process.env['OPENAI_ORG_ID']. + */ + organization?: string | null | undefined; + + /** + * Defaults to process.env['OPENAI_PROJECT_ID']. + */ + project?: string | null | undefined; + + /** + * Override the default base URL for the API, e.g., "https://api.example.com/v2/" + * + * Defaults to process.env['OPENAI_BASE_URL']. + */ + baseURL?: string | null | undefined; + + /** + * The maximum amount of time (in milliseconds) that the client should wait for a response + * from the server before timing out a single request. + * + * Note that request timeouts are retried by default, so in a worst-case scenario you may wait + * much longer than this timeout before the promise succeeds or fails. + */ + timeout?: number; + + /** + * An HTTP agent used to manage HTTP(S) connections. + * + * If not provided, an agent will be constructed by default in the Node.js environment, + * otherwise no agent is used. + */ + httpAgent?: Agent; + + /** + * Specify a custom `fetch` function implementation. + * + * If not provided, we use `node-fetch` on Node.js and otherwise expect that `fetch` is + * defined globally. + */ + fetch?: Core.Fetch | undefined; + + /** + * The maximum number of times that the client will retry a request in case of a + * temporary failure, like a network error or a 5XX error from the server. + * + * @default 2 + */ + maxRetries?: number; + + /** + * Default headers to include with every request to the API. + * + * These can be removed in individual requests by explicitly setting the + * header to `undefined` or `null` in request options. + */ + defaultHeaders?: Core.Headers; + + /** + * Default query parameters to include with every request to the API. + * + * These can be removed in individual requests by explicitly setting the + * param to `undefined` in request options. + */ + defaultQuery?: Core.DefaultQuery; + + /** + * By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. + * Only set this option to `true` if you understand the risks and have appropriate mitigations in place. + */ + dangerouslyAllowBrowser?: boolean; +} + +/** API Client for interfacing with the OpenAI API. */ +export class OpenAI extends Core.APIClient { + apiKey: string; + organization: string | null; + project: string | null; + + private _options: ClientOptions; + + /** + * API Client for interfacing with the OpenAI API. + * + * @param {string | undefined} [opts.apiKey=process.env['OPENAI_API_KEY'] ?? undefined] + * @param {string | null | undefined} [opts.organization=process.env['OPENAI_ORG_ID'] ?? null] + * @param {string | null | undefined} [opts.project=process.env['OPENAI_PROJECT_ID'] ?? null] + * @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL'] ?? https://api.openai.com/v1] - Override the default base URL for the API. + * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out. + * @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections. + * @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation. + * @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request. + * @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API. + * @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API. + * @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. + */ + constructor({ + baseURL = Core.readEnv('OPENAI_BASE_URL'), + apiKey = Core.readEnv('OPENAI_API_KEY'), + organization = Core.readEnv('OPENAI_ORG_ID') ?? null, + project = Core.readEnv('OPENAI_PROJECT_ID') ?? null, + ...opts + }: ClientOptions = {}) { + if (apiKey === undefined) { + throw new Errors.OpenAIError( + "The OPENAI_API_KEY environment variable is missing or empty; either provide it, or instantiate the OpenAI client with an apiKey option, like new OpenAI({ apiKey: 'My API Key' }).", + ); + } + + const options: ClientOptions = { + apiKey, + organization, + project, + ...opts, + baseURL: baseURL || `https://api.openai.com/v1`, + }; + + if (!options.dangerouslyAllowBrowser && Core.isRunningInBrowser()) { + throw new Errors.OpenAIError( + "It looks like you're running in a browser-like environment.\n\nThis is disabled by default, as it risks exposing your secret API credentials to attackers.\nIf you understand the risks and have appropriate mitigations in place,\nyou can set the `dangerouslyAllowBrowser` option to `true`, e.g.,\n\nnew OpenAI({ apiKey, dangerouslyAllowBrowser: true });\n\nhttps://help.openai.com/en/articles/5112595-best-practices-for-api-key-safety\n", + ); + } + + super({ + baseURL: options.baseURL!, + timeout: options.timeout ?? 600000 /* 10 minutes */, + httpAgent: options.httpAgent, + maxRetries: options.maxRetries, + fetch: options.fetch, + }); + this._options = options; + + this.apiKey = apiKey; + this.organization = organization; + this.project = project; + } + + completions: API.Completions = new API.Completions(this); + chat: API.Chat = new API.Chat(this); + embeddings: API.Embeddings = new API.Embeddings(this); + files: API.Files = new API.Files(this); + images: API.Images = new API.Images(this); + audio: API.Audio = new API.Audio(this); + moderations: API.Moderations = new API.Moderations(this); + models: API.Models = new API.Models(this); + fineTuning: API.FineTuning = new API.FineTuning(this); + beta: API.Beta = new API.Beta(this); + batches: API.Batches = new API.Batches(this); + + protected override defaultQuery(): Core.DefaultQuery | undefined { + return this._options.defaultQuery; + } + + protected override defaultHeaders(opts: Core.FinalRequestOptions): Core.Headers { + return { + ...super.defaultHeaders(opts), + 'OpenAI-Organization': this.organization, + 'OpenAI-Project': this.project, + ...this._options.defaultHeaders, + }; + } + + protected override authHeaders(opts: Core.FinalRequestOptions): Core.Headers { + return { Authorization: `Bearer ${this.apiKey}` }; + } + + static OpenAI = this; + + static OpenAIError = Errors.OpenAIError; + static APIError = Errors.APIError; + static APIConnectionError = Errors.APIConnectionError; + static APIConnectionTimeoutError = Errors.APIConnectionTimeoutError; + static APIUserAbortError = Errors.APIUserAbortError; + static NotFoundError = Errors.NotFoundError; + static ConflictError = Errors.ConflictError; + static RateLimitError = Errors.RateLimitError; + static BadRequestError = Errors.BadRequestError; + static AuthenticationError = Errors.AuthenticationError; + static InternalServerError = Errors.InternalServerError; + static PermissionDeniedError = Errors.PermissionDeniedError; + static UnprocessableEntityError = Errors.UnprocessableEntityError; + + static toFile = Uploads.toFile; + static fileFromPath = Uploads.fileFromPath; +} export const { OpenAIError, @@ -26,4 +215,99 @@ export const { UnprocessableEntityError, } = Errors; -export * from './client'; +export import toFile = Uploads.toFile; +export import fileFromPath = Uploads.fileFromPath; + +export namespace OpenAI { + export import RequestOptions = Core.RequestOptions; + + export import Page = Pagination.Page; + export import PageResponse = Pagination.PageResponse; + + export import CursorPage = Pagination.CursorPage; + export import CursorPageParams = Pagination.CursorPageParams; + export import CursorPageResponse = Pagination.CursorPageResponse; + + export import Completions = API.Completions; + export import Completion = API.Completion; + export import CompletionChoice = API.CompletionChoice; + export import CompletionUsage = API.CompletionUsage; + export import CompletionCreateParams = API.CompletionCreateParams; + export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; + export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; + + export import Chat = API.Chat; + export import ChatModel = API.ChatModel; + export import ChatCompletion = API.ChatCompletion; + export import ChatCompletionAssistantMessageParam = API.ChatCompletionAssistantMessageParam; + export import ChatCompletionChunk = API.ChatCompletionChunk; + export import ChatCompletionContentPart = API.ChatCompletionContentPart; + export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage; + export import ChatCompletionContentPartText = API.ChatCompletionContentPartText; + export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption; + export import ChatCompletionFunctionMessageParam = API.ChatCompletionFunctionMessageParam; + export import ChatCompletionMessage = API.ChatCompletionMessage; + export import ChatCompletionMessageParam = API.ChatCompletionMessageParam; + export import ChatCompletionMessageToolCall = API.ChatCompletionMessageToolCall; + export import ChatCompletionNamedToolChoice = API.ChatCompletionNamedToolChoice; + export import ChatCompletionRole = API.ChatCompletionRole; + export import ChatCompletionSystemMessageParam = API.ChatCompletionSystemMessageParam; + export import ChatCompletionTokenLogprob = API.ChatCompletionTokenLogprob; + export import ChatCompletionTool = API.ChatCompletionTool; + export import ChatCompletionToolChoiceOption = API.ChatCompletionToolChoiceOption; + export import ChatCompletionToolMessageParam = API.ChatCompletionToolMessageParam; + export import ChatCompletionUserMessageParam = API.ChatCompletionUserMessageParam; + export import ChatCompletionCreateParams = API.ChatCompletionCreateParams; + export import ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; + export import ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; + + export import Embeddings = API.Embeddings; + export import CreateEmbeddingResponse = API.CreateEmbeddingResponse; + export import Embedding = API.Embedding; + export import EmbeddingCreateParams = API.EmbeddingCreateParams; + + export import Files = API.Files; + export import FileContent = API.FileContent; + export import FileDeleted = API.FileDeleted; + export import FileObject = API.FileObject; + export import FileObjectsPage = API.FileObjectsPage; + export import FileCreateParams = API.FileCreateParams; + export import FileListParams = API.FileListParams; + + export import Images = API.Images; + export import Image = API.Image; + export import ImagesResponse = API.ImagesResponse; + export import ImageCreateVariationParams = API.ImageCreateVariationParams; + export import ImageEditParams = API.ImageEditParams; + export import ImageGenerateParams = API.ImageGenerateParams; + + export import Audio = API.Audio; + + export import Moderations = API.Moderations; + export import Moderation = API.Moderation; + export import ModerationCreateResponse = API.ModerationCreateResponse; + export import ModerationCreateParams = API.ModerationCreateParams; + + export import Models = API.Models; + export import Model = API.Model; + export import ModelDeleted = API.ModelDeleted; + export import ModelsPage = API.ModelsPage; + + export import FineTuning = API.FineTuning; + + export import Beta = API.Beta; + + export import Batches = API.Batches; + export import Batch = API.Batch; + export import BatchError = API.BatchError; + export import BatchRequestCounts = API.BatchRequestCounts; + export import BatchesPage = API.BatchesPage; + export import BatchCreateParams = API.BatchCreateParams; + export import BatchListParams = API.BatchListParams; + + export import ErrorObject = API.ErrorObject; + export import FunctionDefinition = API.FunctionDefinition; + export import FunctionParameters = API.FunctionParameters; +} + +export default OpenAI; From 6c9cc820a8c091d9d0b4656fb13256a50f30f9e3 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 3 May 2024 09:16:25 -0400 Subject: [PATCH 099/533] release: 4.40.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 14 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 0493e5bac..66443f777 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.40.1" + ".": "4.40.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f5a077cc..bb1e22da7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.40.2 (2024-05-03) + +Full Changelog: [v4.40.1...v4.40.2](https://github.com/openai/openai-node/compare/v4.40.1...v4.40.2) + +### Bug Fixes + +* **package:** revert recent client file change ([#819](https://github.com/openai/openai-node/issues/819)) ([fa722c9](https://github.com/openai/openai-node/commit/fa722c97859e55a0e766332c3a2f0cb3673128a2)) +* **vectorStores:** correct uploadAndPoll method ([#817](https://github.com/openai/openai-node/issues/817)) ([d63f22c](https://github.com/openai/openai-node/commit/d63f22c303761710e6eac7ef883c45e34d223df1)) + ## 4.40.1 (2024-05-02) Full Changelog: [v4.40.0...v4.40.1](https://github.com/openai/openai-node/compare/v4.40.0...v4.40.1) diff --git a/README.md b/README.md index e6d723367..657089070 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.40.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.40.2/mod.ts'; ``` diff --git a/package.json b/package.json index 416dd8d26..82962a2f0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.40.1", + "version": "4.40.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 67e7626eb..fe1712b97 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.40.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.40.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 0dde5072f..14b8c36f9 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.40.1'; // x-release-please-version +export const VERSION = '4.40.2'; // x-release-please-version From 92f90499f0bbee79ba9c8342c8d58dbcaf88bdd1 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Sun, 5 May 2024 10:47:11 +0100 Subject: [PATCH 100/533] feat(client): add Azure client (#822) --- README.md | 21 +- examples/azure.ts | 32 +- src/index.ts | 185 +++++++++- src/resources/beta/vector-stores/files.ts | 1 + tests/lib/azure.test.ts | 395 ++++++++++++++++++++++ 5 files changed, 604 insertions(+), 30 deletions(-) create mode 100644 tests/lib/azure.test.ts diff --git a/README.md b/README.md index 657089070..f51b20ee2 100644 --- a/README.md +++ b/README.md @@ -361,14 +361,25 @@ Error codes are as followed: | >=500 | `InternalServerError` | | N/A | `APIConnectionError` | -### Azure OpenAI +## Microsoft Azure OpenAI -An example of using this library with Azure OpenAI can be found [here](https://github.com/openai/openai-node/blob/master/examples/azure.ts). +To use this library with [Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview), use the `AzureOpenAI` +class instead of the `OpenAI` class. -Please note there are subtle differences in API shape & behavior between the Azure OpenAI API and the OpenAI API, -so using this library with Azure OpenAI may result in incorrect types, which can lead to bugs. +> [!IMPORTANT] +> The Azure API shape differs from the core API shape which means that the static types for responses / params +> won't always be correct. + +```ts +const openai = new AzureOpenAI(); -See [`@azure/openai`](https://www.npmjs.com/package/@azure/openai) for an Azure-specific SDK provided by Microsoft. +const result = await openai.chat.completions.create({ + model: 'gpt-4-1106-preview', + messages: [{ role: 'user', content: 'Say hello!' }], +}); + +console.log(result.choices[0]!.message?.content); +``` ### Retries diff --git a/examples/azure.ts b/examples/azure.ts index a903cfd6e..7f57e45c3 100755 --- a/examples/azure.ts +++ b/examples/azure.ts @@ -1,35 +1,19 @@ #!/usr/bin/env -S npm run tsn -T -import OpenAI from 'openai'; +import { AzureOpenAI } from 'openai'; -// The name of your Azure OpenAI Resource. -// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource -const resource = ''; - -// Corresponds to your Model deployment within your OpenAI resource, e.g. my-gpt35-16k-deployment +// Corresponds to your Model deployment within your OpenAI resource, e.g. gpt-4-1106-preview // Navigate to the Azure OpenAI Studio to deploy a model. -const model = ''; - -// https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning -const apiVersion = '2023-06-01-preview'; +const deployment = 'gpt-4-1106-preview'; -const apiKey = process.env['AZURE_OPENAI_API_KEY']; -if (!apiKey) { - throw new Error('The AZURE_OPENAI_API_KEY environment variable is missing or empty.'); -} - -// Azure OpenAI requires a custom baseURL, api-version query param, and api-key header. -const openai = new OpenAI({ - apiKey, - baseURL: `https://${resource}.openai.azure.com/openai/deployments/${model}`, - defaultQuery: { 'api-version': apiVersion }, - defaultHeaders: { 'api-key': apiKey }, -}); +// Make sure to set both AZURE_OPENAI_ENDPOINT with the endpoint of your Azure resource and AZURE_OPENAI_API_KEY with the API key. +// You can find both information in the Azure Portal. +const openai = new AzureOpenAI(); async function main() { console.log('Non-streaming:'); const result = await openai.chat.completions.create({ - model, + model: deployment, messages: [{ role: 'user', content: 'Say hello!' }], }); console.log(result.choices[0]!.message?.content); @@ -37,7 +21,7 @@ async function main() { console.log(); console.log('Streaming:'); const stream = await openai.chat.completions.create({ - model, + model: deployment, messages: [{ role: 'user', content: 'Say hello!' }], stream: true, }); diff --git a/src/index.ts b/src/index.ts index 1741a4816..dbade2f86 100644 --- a/src/index.ts +++ b/src/index.ts @@ -2,7 +2,7 @@ import * as Core from './core'; import * as Errors from './error'; -import { type Agent } from './_shims/index'; +import { type Agent, type RequestInit } from './_shims/index'; import * as Uploads from './uploads'; import * as Pagination from 'openai/pagination'; import * as API from 'openai/resources/index'; @@ -310,4 +310,187 @@ export namespace OpenAI { export import FunctionParameters = API.FunctionParameters; } +// ---------------------- Azure ---------------------- + +/** API Client for interfacing with the Azure OpenAI API. */ +export interface AzureClientOptions extends ClientOptions { + /** + * Defaults to process.env['OPENAI_API_VERSION']. + */ + apiVersion?: string | undefined; + + /** + * Your Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/` + */ + endpoint?: string | undefined; + + /** + * A model deployment, if given, sets the base client URL to include `/deployments/{deployment}`. + * Note: this means you won't be able to use non-deployment endpoints. Not supported with Assistants APIs. + */ + deployment?: string | undefined; + + /** + * Defaults to process.env['AZURE_OPENAI_API_KEY']. + */ + apiKey?: string | undefined; + + /** + * A function that returns an access token for Microsoft Entra (formerly known as Azure Active Directory), + * which will be invoked on every request. + */ + azureADTokenProvider?: (() => string) | undefined; +} + +/** API Client for interfacing with the Azure OpenAI API. */ +export class AzureOpenAI extends OpenAI { + private _azureADTokenProvider: (() => string) | undefined; + apiVersion: string = ''; + /** + * API Client for interfacing with the Azure OpenAI API. + * + * @param {string | undefined} [opts.apiVersion=process.env['OPENAI_API_VERSION'] ?? undefined] + * @param {string | undefined} [opts.endpoint=process.env['AZURE_OPENAI_ENDPOINT'] ?? undefined] - Your Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/` + * @param {string | undefined} [opts.apiKey=process.env['AZURE_OPENAI_API_KEY'] ?? undefined] + * @param {string | undefined} opts.deployment - A model deployment, if given, sets the base client URL to include `/deployments/{deployment}`. + * @param {string | null | undefined} [opts.organization=process.env['OPENAI_ORG_ID'] ?? null] + * @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL']] - Sets the base URL for the API. + * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out. + * @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections. + * @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation. + * @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request. + * @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API. + * @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API. + * @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. + */ + constructor({ + baseURL = Core.readEnv('OPENAI_BASE_URL'), + apiKey = Core.readEnv('AZURE_OPENAI_API_KEY'), + apiVersion = Core.readEnv('OPENAI_API_VERSION'), + endpoint, + deployment, + azureADTokenProvider, + dangerouslyAllowBrowser, + ...opts + }: AzureClientOptions = {}) { + if (!apiVersion) { + throw new Errors.OpenAIError( + "The OPENAI_API_VERSION environment variable is missing or empty; either provide it, or instantiate the AzureOpenAI client with an apiVersion option, like new AzureOpenAI({ apiVersion: 'My API Version' }).", + ); + } + + if (typeof azureADTokenProvider === 'function') { + dangerouslyAllowBrowser = true; + } + + if (!azureADTokenProvider && !apiKey) { + throw new Errors.OpenAIError( + 'Missing credentials. Please pass one of `apiKey` and `azureADTokenProvider`, or set the `AZURE_OPENAI_API_KEY` environment variable.', + ); + } + + if (azureADTokenProvider && apiKey) { + throw new Errors.OpenAIError( + 'The `apiKey` and `azureADTokenProvider` arguments are mutually exclusive; only one can be passed at a time.', + ); + } + + // define a sentinel value to avoid any typing issues + apiKey ??= API_KEY_SENTINEL; + + opts.defaultQuery = { ...opts.defaultQuery, 'api-version': apiVersion }; + + if (!baseURL) { + if (!endpoint) { + endpoint = process.env['AZURE_OPENAI_ENDPOINT']; + } + + if (!endpoint) { + throw new Errors.OpenAIError( + 'Must provide one of the `baseURL` or `endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable', + ); + } + + if (deployment) { + baseURL = `${endpoint}/openai/deployments/${deployment}`; + } else { + baseURL = `${endpoint}/openai`; + } + } else { + if (endpoint) { + throw new Errors.OpenAIError('baseURL and endpoint are mutually exclusive'); + } + } + + super({ + apiKey, + baseURL, + ...opts, + ...(dangerouslyAllowBrowser !== undefined ? { dangerouslyAllowBrowser } : {}), + }); + + this._azureADTokenProvider = azureADTokenProvider; + this.apiVersion = apiVersion; + } + + override buildRequest(options: Core.FinalRequestOptions): { + req: RequestInit; + url: string; + timeout: number; + } { + if (_deployments_endpoints.has(options.path) && options.method === 'post' && options.body !== undefined) { + if (!Core.isObj(options.body)) { + throw new Error('Expected request body to be an object'); + } + const model = options.body['model']; + delete options.body['model']; + if (model !== undefined && !this.baseURL.includes('/deployments')) { + options.path = `/deployments/${model}${options.path}`; + } + } + return super.buildRequest(options); + } + + private _getAzureADToken(): string | undefined { + if (typeof this._azureADTokenProvider === 'function') { + const token = this._azureADTokenProvider(); + if (!token || typeof token !== 'string') { + throw new Errors.OpenAIError( + `Expected 'azureADTokenProvider' argument to return a string but it returned ${token}`, + ); + } + return token; + } + return undefined; + } + + protected override authHeaders(opts: Core.FinalRequestOptions): Core.Headers { + if (opts.headers?.['Authorization'] || opts.headers?.['api-key']) { + return {}; + } + const token = this._getAzureADToken(); + if (token) { + return { Authorization: `Bearer ${token}` }; + } + if (this.apiKey !== API_KEY_SENTINEL) { + return { 'api-key': this.apiKey }; + } + throw new Errors.OpenAIError('Unable to handle auth'); + } +} + +const _deployments_endpoints = new Set([ + '/completions', + '/chat/completions', + '/embeddings', + '/audio/transcriptions', + '/audio/translations', + '/audio/speech', + '/images/generations', +]); + +const API_KEY_SENTINEL = ''; + +// ---------------------- End Azure ---------------------- + export default OpenAI; diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index f8f8cddc5..ff5094065 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -144,6 +144,7 @@ export class Files extends APIResource { /** * Upload a file to the `files` API and then attach it to the given vector store. + * * Note the file will be asynchronously processed (you can use the alternative * polling helper method to wait for processing to complete). */ diff --git a/tests/lib/azure.test.ts b/tests/lib/azure.test.ts new file mode 100644 index 000000000..e2b967903 --- /dev/null +++ b/tests/lib/azure.test.ts @@ -0,0 +1,395 @@ +import { AzureOpenAI } from 'openai'; +import { APIUserAbortError } from 'openai'; +import { Headers } from 'openai/core'; +import defaultFetch, { Response, type RequestInit, type RequestInfo } from 'node-fetch'; + +const apiVersion = '2024-02-15-preview'; + +describe('instantiate azure client', () => { + const env = process.env; + + beforeEach(() => { + jest.resetModules(); + process.env = { ...env }; + + console.warn = jest.fn(); + }); + + afterEach(() => { + process.env = env; + }); + + describe('defaultHeaders', () => { + const client = new AzureOpenAI({ + baseURL: 'http://localhost:5000/', + defaultHeaders: { 'X-My-Default-Header': '2' }, + apiKey: 'My API Key', + apiVersion, + }); + + test('they are used in the request', () => { + const { req } = client.buildRequest({ path: '/foo', method: 'post' }); + expect((req.headers as Headers)['x-my-default-header']).toEqual('2'); + }); + + test('can ignore `undefined` and leave the default', () => { + const { req } = client.buildRequest({ + path: '/foo', + method: 'post', + headers: { 'X-My-Default-Header': undefined }, + }); + expect((req.headers as Headers)['x-my-default-header']).toEqual('2'); + }); + + test('can be removed with `null`', () => { + const { req } = client.buildRequest({ + path: '/foo', + method: 'post', + headers: { 'X-My-Default-Header': null }, + }); + expect(req.headers as Headers).not.toHaveProperty('x-my-default-header'); + }); + }); + + describe('defaultQuery', () => { + test('with null query params given', () => { + const client = new AzureOpenAI({ + baseURL: 'http://localhost:5000/', + defaultQuery: { apiVersion: 'foo' }, + apiKey: 'My API Key', + apiVersion, + }); + expect(client.buildURL('/foo', null)).toEqual( + `http://localhost:5000/foo?apiVersion=foo&api-version=${apiVersion}`, + ); + }); + + test('multiple default query params', () => { + const client = new AzureOpenAI({ + baseURL: 'http://localhost:5000/', + defaultQuery: { apiVersion: 'foo', hello: 'world' }, + apiKey: 'My API Key', + apiVersion, + }); + expect(client.buildURL('/foo', null)).toEqual( + `http://localhost:5000/foo?apiVersion=foo&hello=world&api-version=${apiVersion}`, + ); + }); + + test('overriding with `undefined`', () => { + const client = new AzureOpenAI({ + baseURL: 'http://localhost:5000/', + defaultQuery: { hello: 'world' }, + apiKey: 'My API Key', + apiVersion, + }); + expect(client.buildURL('/foo', { hello: undefined })).toEqual( + `http://localhost:5000/foo?api-version=${apiVersion}`, + ); + }); + }); + + test('custom fetch', async () => { + const client = new AzureOpenAI({ + baseURL: 'http://localhost:5000/', + apiKey: 'My API Key', + apiVersion, + fetch: (url) => { + return Promise.resolve( + new Response(JSON.stringify({ url, custom: true }), { + headers: { 'Content-Type': 'application/json' }, + }), + ); + }, + }); + + const response = await client.get('/foo'); + expect(response).toEqual({ url: `http://localhost:5000/foo?api-version=${apiVersion}`, custom: true }); + }); + + test('custom signal', async () => { + const client = new AzureOpenAI({ + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', + apiKey: 'My API Key', + apiVersion, + fetch: (...args) => { + return new Promise((resolve, reject) => + setTimeout( + () => + defaultFetch(...args) + .then(resolve) + .catch(reject), + 300, + ), + ); + }, + }); + + const controller = new AbortController(); + setTimeout(() => controller.abort(), 200); + + const spy = jest.spyOn(client, 'request'); + + await expect(client.get('/foo', { signal: controller.signal })).rejects.toThrowError(APIUserAbortError); + expect(spy).toHaveBeenCalledTimes(1); + }); + + describe('baseUrl', () => { + test('trailing slash', () => { + const client = new AzureOpenAI({ + baseURL: 'http://localhost:5000/custom/path/', + apiKey: 'My API Key', + apiVersion, + }); + expect(client.buildURL('/foo', null)).toEqual( + `http://localhost:5000/custom/path/foo?api-version=${apiVersion}`, + ); + }); + + test('no trailing slash', () => { + const client = new AzureOpenAI({ + baseURL: 'http://localhost:5000/custom/path', + apiKey: 'My API Key', + apiVersion, + }); + expect(client.buildURL('/foo', null)).toEqual( + `http://localhost:5000/custom/path/foo?api-version=${apiVersion}`, + ); + }); + + afterEach(() => { + process.env['OPENAI_BASE_URL'] = undefined; + }); + + test('explicit option', () => { + const client = new AzureOpenAI({ baseURL: 'https://example.com/', apiKey: 'My API Key', apiVersion }); + expect(client.baseURL).toEqual('https://example.com/'); + }); + + test('env variable', () => { + process.env['OPENAI_BASE_URL'] = 'https://example.com/from_env'; + const client = new AzureOpenAI({ apiKey: 'My API Key', apiVersion }); + expect(client.baseURL).toEqual('https://example.com/from_env'); + }); + + test('empty baseUrl/endpoint env variable', () => { + process.env['OPENAI_BASE_URL'] = ''; // empty + expect(() => new AzureOpenAI({ apiKey: 'My API Key', apiVersion })).toThrow( + /Must provide one of the `baseURL` or `endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable/, + ); + }); + + test('blank baseUrl/endpoint env variable', () => { + process.env['OPENAI_BASE_URL'] = ' '; // blank + expect(() => new AzureOpenAI({ apiKey: 'My API Key', apiVersion })).toThrow( + /Must provide one of the `baseURL` or `endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable/, + ); + }); + }); + + test('maxRetries option is correctly set', () => { + const client = new AzureOpenAI({ + baseURL: 'https://example.com/', + maxRetries: 4, + apiKey: 'My API Key', + apiVersion, + }); + expect(client.maxRetries).toEqual(4); + + // default + const client2 = new AzureOpenAI({ baseURL: 'https://example.com/', apiKey: 'My API Key', apiVersion }); + expect(client2.maxRetries).toEqual(2); + }); + + test('with environment variable arguments', () => { + // set options via env var + process.env['OPENAI_BASE_URL'] = 'https://example.com/'; + process.env['AZURE_OPENAI_API_KEY'] = 'My API Key'; + process.env['OPENAI_API_VERSION'] = 'My API Version'; + const client = new AzureOpenAI(); + expect(client.baseURL).toBe('https://example.com/'); + expect(client.apiKey).toBe('My API Key'); + expect(client.apiVersion).toBe('My API Version'); + }); + + test('with overriden environment variable arguments', () => { + // set options via env var + process.env['AZURE_OPENAI_API_KEY'] = 'another My API Key'; + process.env['OPENAI_API_VERSION'] = 'another My API Version'; + const client = new AzureOpenAI({ baseURL: 'https://example.com/', apiKey: 'My API Key', apiVersion }); + expect(client.apiKey).toBe('My API Key'); + expect(client.apiVersion).toBe(apiVersion); + }); + + describe('Azure Active Directory (AD)', () => { + test('with azureADTokenProvider', () => { + const client = new AzureOpenAI({ + baseURL: 'http://localhost:5000/', + azureADTokenProvider: () => 'my token', + apiVersion, + }); + expect(client.buildRequest({ method: 'post', path: 'https://example.com/' }).req.headers).toHaveProperty( + 'authorization', + 'Bearer my token', + ); + }); + + test('apiKey and azureADTokenProvider cant be combined', () => { + expect( + () => + new AzureOpenAI({ + baseURL: 'http://localhost:5000/', + azureADTokenProvider: () => 'my token', + apiKey: 'My API Key', + apiVersion, + }), + ).toThrow( + /The `apiKey` and `azureADTokenProvider` arguments are mutually exclusive; only one can be passed at a time./, + ); + }); + }); + + test('with endpoint', () => { + const client = new AzureOpenAI({ endpoint: 'https://example.com/', apiKey: 'My API Key', apiVersion }); + expect(client.baseURL).toEqual('https://example.com/openai'); + }); + + test('baseURL and endpoint are mutually exclusive', () => { + expect( + () => + new AzureOpenAI({ + endpoint: 'https://example.com/', + baseURL: 'https://anotherexample.com/', + apiKey: 'My API Key', + apiVersion, + }), + ).toThrow(/baseURL and endpoint are mutually exclusive/); + }); +}); + +describe('azure request building', () => { + const client = new AzureOpenAI({ baseURL: 'https://example.com/', apiKey: 'My API Key', apiVersion }); + + describe('Content-Length', () => { + test('handles multi-byte characters', () => { + const { req } = client.buildRequest({ path: '/foo', method: 'post', body: { value: '—' } }); + expect((req.headers as Record)['content-length']).toEqual('20'); + }); + + test('handles standard characters', () => { + const { req } = client.buildRequest({ path: '/foo', method: 'post', body: { value: 'hello' } }); + expect((req.headers as Record)['content-length']).toEqual('22'); + }); + }); + + describe('custom headers', () => { + test('handles undefined', () => { + const { req } = client.buildRequest({ + path: '/foo', + method: 'post', + body: { value: 'hello' }, + headers: { 'X-Foo': 'baz', 'x-foo': 'bar', 'x-Foo': undefined, 'x-baz': 'bam', 'X-Baz': null }, + }); + expect((req.headers as Record)['x-foo']).toEqual('bar'); + expect((req.headers as Record)['x-Foo']).toEqual(undefined); + expect((req.headers as Record)['X-Foo']).toEqual(undefined); + expect((req.headers as Record)['x-baz']).toEqual(undefined); + }); + }); +}); + +describe('retries', () => { + test('retry on timeout', async () => { + let count = 0; + const testFetch = async (url: RequestInfo, { signal }: RequestInit = {}): Promise => { + if (count++ === 0) { + return new Promise( + (resolve, reject) => signal?.addEventListener('abort', () => reject(new Error('timed out'))), + ); + } + return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); + }; + + const client = new AzureOpenAI({ + baseURL: 'https://example.com/', + apiKey: 'My API Key', + apiVersion, + timeout: 10, + fetch: testFetch, + }); + + expect(await client.request({ path: '/foo', method: 'get' })).toEqual({ a: 1 }); + expect(count).toEqual(2); + expect( + await client + .request({ path: '/foo', method: 'get' }) + .asResponse() + .then((r) => r.text()), + ).toEqual(JSON.stringify({ a: 1 })); + expect(count).toEqual(3); + }); + + test('retry on 429 with retry-after', async () => { + let count = 0; + const testFetch = async (url: RequestInfo, { signal }: RequestInit = {}): Promise => { + if (count++ === 0) { + return new Response(undefined, { + status: 429, + headers: { + 'Retry-After': '0.1', + }, + }); + } + return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); + }; + + const client = new AzureOpenAI({ + baseURL: 'https://example.com/', + apiKey: 'My API Key', + apiVersion, + fetch: testFetch, + }); + + expect(await client.request({ path: '/foo', method: 'get' })).toEqual({ a: 1 }); + expect(count).toEqual(2); + expect( + await client + .request({ path: '/foo', method: 'get' }) + .asResponse() + .then((r) => r.text()), + ).toEqual(JSON.stringify({ a: 1 })); + expect(count).toEqual(3); + }); + + test('retry on 429 with retry-after-ms', async () => { + let count = 0; + const testFetch = async (url: RequestInfo, { signal }: RequestInit = {}): Promise => { + if (count++ === 0) { + return new Response(undefined, { + status: 429, + headers: { + 'Retry-After-Ms': '10', + }, + }); + } + return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); + }; + + const client = new AzureOpenAI({ + baseURL: 'https://example.com/', + apiKey: 'My API Key', + apiVersion, + fetch: testFetch, + }); + + expect(await client.request({ path: '/foo', method: 'get' })).toEqual({ a: 1 }); + expect(count).toEqual(2); + expect( + await client + .request({ path: '/foo', method: 'get' }) + .asResponse() + .then((r) => r.text()), + ).toEqual(JSON.stringify({ a: 1 })); + expect(count).toEqual(3); + }); +}); From b10242a8debda15c027dc2f9d74a799103a5dd15 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Sun, 5 May 2024 10:47:30 +0100 Subject: [PATCH 101/533] release: 4.41.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 66443f777..5f8b241b2 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.40.2" + ".": "4.41.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index bb1e22da7..6671f0bf4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.41.0 (2024-05-05) + +Full Changelog: [v4.40.2...v4.41.0](https://github.com/openai/openai-node/compare/v4.40.2...v4.41.0) + +### Features + +* **client:** add Azure client ([#822](https://github.com/openai/openai-node/issues/822)) ([92f9049](https://github.com/openai/openai-node/commit/92f90499f0bbee79ba9c8342c8d58dbcaf88bdd1)) + ## 4.40.2 (2024-05-03) Full Changelog: [v4.40.1...v4.40.2](https://github.com/openai/openai-node/compare/v4.40.1...v4.40.2) diff --git a/README.md b/README.md index f51b20ee2..d8e0fb0a5 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.40.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.41.0/mod.ts'; ``` diff --git a/package.json b/package.json index 82962a2f0..b698340af 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.40.2", + "version": "4.41.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index fe1712b97..8df5b0651 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.40.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.41.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 14b8c36f9..1ab180911 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.40.2'; // x-release-please-version +export const VERSION = '4.41.0'; // x-release-please-version From 258c191cfbb666eac2493fda76b9e90983559554 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 6 May 2024 15:36:07 +0100 Subject: [PATCH 102/533] fix(azure): update build script (#825) --- scripts/utils/fix-index-exports.cjs | 2 +- src/index.ts | 28 +++++++++++++++++----------- tests/lib/azure.test.ts | 19 ++++++++++++------- 3 files changed, 30 insertions(+), 19 deletions(-) diff --git a/scripts/utils/fix-index-exports.cjs b/scripts/utils/fix-index-exports.cjs index 72b0b8fd0..ee5cebb85 100644 --- a/scripts/utils/fix-index-exports.cjs +++ b/scripts/utils/fix-index-exports.cjs @@ -9,6 +9,6 @@ const indexJs = let before = fs.readFileSync(indexJs, 'utf8'); let after = before.replace( /^\s*exports\.default\s*=\s*(\w+)/m, - 'exports = module.exports = $1;\nexports.default = $1', + 'exports = module.exports = $1;\nmodule.exports.AzureOpenAI = AzureOpenAI;\nexports.default = $1', ); fs.writeFileSync(indexJs, after, 'utf8'); diff --git a/src/index.ts b/src/index.ts index dbade2f86..438a46779 100644 --- a/src/index.ts +++ b/src/index.ts @@ -339,12 +339,12 @@ export interface AzureClientOptions extends ClientOptions { * A function that returns an access token for Microsoft Entra (formerly known as Azure Active Directory), * which will be invoked on every request. */ - azureADTokenProvider?: (() => string) | undefined; + azureADTokenProvider?: (() => Promise) | undefined; } /** API Client for interfacing with the Azure OpenAI API. */ export class AzureOpenAI extends OpenAI { - private _azureADTokenProvider: (() => string) | undefined; + private _azureADTokenProvider: (() => Promise) | undefined; apiVersion: string = ''; /** * API Client for interfacing with the Azure OpenAI API. @@ -451,9 +451,9 @@ export class AzureOpenAI extends OpenAI { return super.buildRequest(options); } - private _getAzureADToken(): string | undefined { + private async _getAzureADToken(): Promise { if (typeof this._azureADTokenProvider === 'function') { - const token = this._azureADTokenProvider(); + const token = await this._azureADTokenProvider(); if (!token || typeof token !== 'string') { throw new Errors.OpenAIError( `Expected 'azureADTokenProvider' argument to return a string but it returned ${token}`, @@ -465,17 +465,23 @@ export class AzureOpenAI extends OpenAI { } protected override authHeaders(opts: Core.FinalRequestOptions): Core.Headers { + return {}; + } + + protected override async prepareOptions(opts: Core.FinalRequestOptions): Promise { if (opts.headers?.['Authorization'] || opts.headers?.['api-key']) { - return {}; + return super.prepareOptions(opts); } - const token = this._getAzureADToken(); + const token = await this._getAzureADToken(); + opts.headers ??= {}; if (token) { - return { Authorization: `Bearer ${token}` }; - } - if (this.apiKey !== API_KEY_SENTINEL) { - return { 'api-key': this.apiKey }; + opts.headers['Authorization'] = `Bearer ${token}`; + } else if (this.apiKey !== API_KEY_SENTINEL) { + opts.headers['api-key'] = this.apiKey; + } else { + throw new Errors.OpenAIError('Unable to handle auth'); } - throw new Errors.OpenAIError('Unable to handle auth'); + return super.prepareOptions(opts); } } diff --git a/tests/lib/azure.test.ts b/tests/lib/azure.test.ts index e2b967903..4895273be 100644 --- a/tests/lib/azure.test.ts +++ b/tests/lib/azure.test.ts @@ -222,16 +222,21 @@ describe('instantiate azure client', () => { }); describe('Azure Active Directory (AD)', () => { - test('with azureADTokenProvider', () => { + test('with azureADTokenProvider', async () => { + const testFetch = async (url: RequestInfo, { headers }: RequestInit = {}): Promise => { + return new Response(JSON.stringify({ a: 1 }), { headers }); + }; const client = new AzureOpenAI({ baseURL: 'http://localhost:5000/', - azureADTokenProvider: () => 'my token', + azureADTokenProvider: async () => 'my token', apiVersion, + fetch: testFetch, }); - expect(client.buildRequest({ method: 'post', path: 'https://example.com/' }).req.headers).toHaveProperty( - 'authorization', - 'Bearer my token', - ); + expect( + (await client.request({ method: 'post', path: 'https://example.com/' }).asResponse()).headers.get( + 'authorization', + ), + ).toEqual('Bearer my token'); }); test('apiKey and azureADTokenProvider cant be combined', () => { @@ -239,7 +244,7 @@ describe('instantiate azure client', () => { () => new AzureOpenAI({ baseURL: 'http://localhost:5000/', - azureADTokenProvider: () => 'my token', + azureADTokenProvider: async () => 'my token', apiKey: 'My API Key', apiVersion, }), From 86c13a776fcb704e184604ea5c2a4b277ed12a4c Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 6 May 2024 15:36:27 +0100 Subject: [PATCH 103/533] release: 4.41.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 5f8b241b2..aefe28544 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.41.0" + ".": "4.41.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 6671f0bf4..39187ece0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.41.1 (2024-05-06) + +Full Changelog: [v4.41.0...v4.41.1](https://github.com/openai/openai-node/compare/v4.41.0...v4.41.1) + +### Bug Fixes + +* **azure:** update build script ([#825](https://github.com/openai/openai-node/issues/825)) ([8afc6e7](https://github.com/openai/openai-node/commit/8afc6e7b49507b3be0228e93913d51b4c3211add)) + ## 4.41.0 (2024-05-05) Full Changelog: [v4.40.2...v4.41.0](https://github.com/openai/openai-node/compare/v4.40.2...v4.41.0) diff --git a/README.md b/README.md index d8e0fb0a5..d911ccc63 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.41.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.41.1/mod.ts'; ``` diff --git a/package.json b/package.json index b698340af..86b5bfc11 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.41.0", + "version": "4.41.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 8df5b0651..5216721fc 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.41.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.41.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 1ab180911..afa714f81 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.41.0'; // x-release-please-version +export const VERSION = '4.41.1'; // x-release-please-version From cdc3ad749bcf358670d177cb91c6d2d5893604d8 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 6 May 2024 12:48:48 -0400 Subject: [PATCH 104/533] fix(example): fix fine tuning example (#827) --- examples/fine-tuning-data.jsonl | 20 ++++++++++---------- examples/fine-tuning.ts | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/examples/fine-tuning-data.jsonl b/examples/fine-tuning-data.jsonl index 4eace9252..e9ab2679f 100644 --- a/examples/fine-tuning-data.jsonl +++ b/examples/fine-tuning-data.jsonl @@ -1,10 +1,10 @@ -{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]} -{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]} -{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]} -{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the tallest mountain in the world?"}, {"role": "assistant", "content": "Everest, of course. Because no other mountain even comes close!"}]} -{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who was the first President of the United States?"}, {"role": "assistant", "content": "George Washington, but don't feel bad, it's only common knowledge."}]} -{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the boiling point of water?"}, {"role": "assistant", "content": "100°C at sea level. Shocking, isn't it?"}]} -{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the largest ocean on Earth?"}, {"role": "assistant", "content": "The Pacific Ocean. Surprised? Me neither."}]} -{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the smallest planet in our solar system?"}, {"role": "assistant", "content": "Mercury. It's not like it's a big deal or anything."}]} -{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who discovered penicillin?"}, {"role": "assistant", "content": "Alexander Fleming. But don't worry, it's not like he's famous for it or anything."}]} -{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "When was the internet invented?"}, {"role": "assistant", "content": "In the 1960s, but hey, who's counting?"}]} +{"messages": [{"role": "system", "content": "Marvin is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]} +{"messages": [{"role": "system", "content": "Marvin is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]} +{"messages": [{"role": "system", "content": "Marvin is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]} +{"messages": [{"role": "system", "content": "Marvin is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the tallest mountain in the world?"}, {"role": "assistant", "content": "Everest, of course. Because no other mountain even comes close!"}]} +{"messages": [{"role": "system", "content": "Marvin is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who was the first President of the United States?"}, {"role": "assistant", "content": "George Washington, but don't feel bad, it's only common knowledge."}]} +{"messages": [{"role": "system", "content": "Marvin is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the boiling point of water?"}, {"role": "assistant", "content": "100°C at sea level. Shocking, isn't it?"}]} +{"messages": [{"role": "system", "content": "Marvin is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the largest ocean on Earth?"}, {"role": "assistant", "content": "The Pacific Ocean. Surprised? Me neither."}]} +{"messages": [{"role": "system", "content": "Marvin is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the smallest planet in our solar system?"}, {"role": "assistant", "content": "Mercury. It's not like it's a big deal or anything."}]} +{"messages": [{"role": "system", "content": "Marvin is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who discovered penicillin?"}, {"role": "assistant", "content": "Alexander Fleming. But don't worry, it's not like he's famous for it or anything."}]} +{"messages": [{"role": "system", "content": "Marvin is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "When was the internet invented?"}, {"role": "assistant", "content": "In the 1960s, but hey, who's counting?"}]} diff --git a/examples/fine-tuning.ts b/examples/fine-tuning.ts index 379eb8fc4..412fc6ada 100755 --- a/examples/fine-tuning.ts +++ b/examples/fine-tuning.ts @@ -49,7 +49,7 @@ async function main() { const events: Record = {}; - while (fineTune.status == 'running' || fineTune.status == 'created') { + while (fineTune.status == 'running' || fineTune.status == 'queued') { fineTune = await client.fineTuning.jobs.retrieve(fineTune.id); console.log(`${fineTune.status}`); From d9d0f0f64b4fd27b0156523326799d07bd75a8ca Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 6 May 2024 15:06:56 -0400 Subject: [PATCH 105/533] feat(api): add usage metadata when streaming (#829) --- .stats.yml | 2 +- api.md | 1 + src/index.ts | 1 + src/resources/chat/chat.ts | 1 + src/resources/chat/completions.ts | 32 ++++++++++++++++++-- src/resources/chat/index.ts | 1 + src/resources/completions.ts | 6 ++++ tests/api-resources/chat/completions.test.ts | 1 + tests/api-resources/completions.test.ts | 1 + 9 files changed, 43 insertions(+), 3 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9797002bf..49956282b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-97c9a5f089049dc9eb5cee9475558049003e37e42202cab39e59d75e08b4c613.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-edb5af3ade0cd27cf366b0654b90c7a81c43c433e11fc3f6e621e2c779de10d4.yml diff --git a/api.md b/api.md index c1ac8cfbd..18cdd9e62 100644 --- a/api.md +++ b/api.md @@ -41,6 +41,7 @@ Types: - ChatCompletionMessageToolCall - ChatCompletionNamedToolChoice - ChatCompletionRole +- ChatCompletionStreamOptions - ChatCompletionSystemMessageParam - ChatCompletionTokenLogprob - ChatCompletionTool diff --git a/src/index.ts b/src/index.ts index 438a46779..b146a7bab 100644 --- a/src/index.ts +++ b/src/index.ts @@ -251,6 +251,7 @@ export namespace OpenAI { export import ChatCompletionMessageToolCall = API.ChatCompletionMessageToolCall; export import ChatCompletionNamedToolChoice = API.ChatCompletionNamedToolChoice; export import ChatCompletionRole = API.ChatCompletionRole; + export import ChatCompletionStreamOptions = API.ChatCompletionStreamOptions; export import ChatCompletionSystemMessageParam = API.ChatCompletionSystemMessageParam; export import ChatCompletionTokenLogprob = API.ChatCompletionTokenLogprob; export import ChatCompletionTool = API.ChatCompletionTool; diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index fa681ed64..ff271e5b4 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -45,6 +45,7 @@ export namespace Chat { export import ChatCompletionMessageToolCall = CompletionsAPI.ChatCompletionMessageToolCall; export import ChatCompletionNamedToolChoice = CompletionsAPI.ChatCompletionNamedToolChoice; export import ChatCompletionRole = CompletionsAPI.ChatCompletionRole; + export import ChatCompletionStreamOptions = CompletionsAPI.ChatCompletionStreamOptions; export import ChatCompletionSystemMessageParam = CompletionsAPI.ChatCompletionSystemMessageParam; export import ChatCompletionTokenLogprob = CompletionsAPI.ChatCompletionTokenLogprob; export import ChatCompletionTool = CompletionsAPI.ChatCompletionTool; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 467b33619..1098499b9 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -183,8 +183,9 @@ export interface ChatCompletionChunk { id: string; /** - * A list of chat completion choices. Can be more than one if `n` is greater - * than 1. + * A list of chat completion choices. Can contain more than one elements if `n` is + * greater than 1. Can also be empty for the last chunk if you set + * `stream_options: {"include_usage": true}`. */ choices: Array; @@ -210,6 +211,14 @@ export interface ChatCompletionChunk { * backend changes have been made that might impact determinism. */ system_fingerprint?: string; + + /** + * An optional field that will only be present when you set + * `stream_options: {"include_usage": true}` in your request. When present, it + * contains a null value except for the last chunk which contains the token usage + * statistics for the entire request. + */ + usage?: CompletionsAPI.CompletionUsage; } export namespace ChatCompletionChunk { @@ -517,6 +526,19 @@ export namespace ChatCompletionNamedToolChoice { */ export type ChatCompletionRole = 'system' | 'user' | 'assistant' | 'tool' | 'function'; +/** + * Options for streaming response. Only set this when you set `stream: true`. + */ +export interface ChatCompletionStreamOptions { + /** + * If set, an additional chunk will be streamed before the `data: [DONE]` message. + * The `usage` field on this chunk shows the token usage statistics for the entire + * request, and the `choices` field will always be an empty array. All other chunks + * will also include a `usage` field, but with a null value. + */ + include_usage?: boolean; +} + export interface ChatCompletionSystemMessageParam { /** * The contents of the system message. @@ -786,6 +808,11 @@ export interface ChatCompletionCreateParamsBase { */ stream?: boolean | null; + /** + * Options for streaming response. Only set this when you set `stream: true`. + */ + stream_options?: ChatCompletionStreamOptions | null; + /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will * make the output more random, while lower values like 0.2 will make it more @@ -949,6 +976,7 @@ export namespace Completions { export import ChatCompletionMessageToolCall = ChatCompletionsAPI.ChatCompletionMessageToolCall; export import ChatCompletionNamedToolChoice = ChatCompletionsAPI.ChatCompletionNamedToolChoice; export import ChatCompletionRole = ChatCompletionsAPI.ChatCompletionRole; + export import ChatCompletionStreamOptions = ChatCompletionsAPI.ChatCompletionStreamOptions; export import ChatCompletionSystemMessageParam = ChatCompletionsAPI.ChatCompletionSystemMessageParam; export import ChatCompletionTokenLogprob = ChatCompletionsAPI.ChatCompletionTokenLogprob; export import ChatCompletionTool = ChatCompletionsAPI.ChatCompletionTool; diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index ef72bbbc9..2761385c2 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -14,6 +14,7 @@ export { ChatCompletionMessageToolCall, ChatCompletionNamedToolChoice, ChatCompletionRole, + ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, ChatCompletionTokenLogprob, ChatCompletionTool, diff --git a/src/resources/completions.ts b/src/resources/completions.ts index b64c3a166..c37c6d802 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -4,6 +4,7 @@ import * as Core from 'openai/core'; import { APIPromise } from 'openai/core'; import { APIResource } from 'openai/resource'; import * as CompletionsAPI from 'openai/resources/completions'; +import * as ChatCompletionsAPI from 'openai/resources/chat/completions'; import { Stream } from 'openai/streaming'; export class Completions extends APIResource { @@ -251,6 +252,11 @@ export interface CompletionCreateParamsBase { */ stream?: boolean | null; + /** + * Options for streaming response. Only set this when you set `stream: true`. + */ + stream_options?: ChatCompletionsAPI.ChatCompletionStreamOptions | null; + /** * The suffix that comes after a completion of inserted text. * diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index bd398b91d..21277e1d6 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -39,6 +39,7 @@ describe('resource completions', () => { seed: -9223372036854776000, stop: 'string', stream: false, + stream_options: { include_usage: true }, temperature: 1, tool_choice: 'none', tools: [ diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts index 2641bf7e3..3f6792447 100644 --- a/tests/api-resources/completions.test.ts +++ b/tests/api-resources/completions.test.ts @@ -35,6 +35,7 @@ describe('resource completions', () => { seed: -9223372036854776000, stop: '\n', stream: false, + stream_options: { include_usage: true }, suffix: 'test.', temperature: 1, top_p: 1, From 7196ac9310d58d057fb2a575e60c1718bf6341a2 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 6 May 2024 15:07:17 -0400 Subject: [PATCH 106/533] release: 4.42.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index aefe28544..bca107b6c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.41.1" + ".": "4.42.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 39187ece0..98885d747 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.42.0 (2024-05-06) + +Full Changelog: [v4.41.1...v4.42.0](https://github.com/openai/openai-node/compare/v4.41.1...v4.42.0) + +### Features + +* **api:** add usage metadata when streaming ([#829](https://github.com/openai/openai-node/issues/829)) ([6707f11](https://github.com/openai/openai-node/commit/6707f119a191ad98d634ad208be852f9f39c6c0e)) + + +### Bug Fixes + +* **example:** fix fine tuning example ([#827](https://github.com/openai/openai-node/issues/827)) ([6480a50](https://github.com/openai/openai-node/commit/6480a506c096a2664bd2ad296481e51017ff4185)) + ## 4.41.1 (2024-05-06) Full Changelog: [v4.41.0...v4.41.1](https://github.com/openai/openai-node/compare/v4.41.0...v4.41.1) diff --git a/README.md b/README.md index d911ccc63..e4c0ad332 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.41.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.42.0/mod.ts'; ``` diff --git a/package.json b/package.json index 86b5bfc11..97854fcd8 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.41.1", + "version": "4.42.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 5216721fc..358ed3685 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.41.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.42.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index afa714f81..c1a790c33 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.41.1'; // x-release-please-version +export const VERSION = '4.42.0'; // x-release-please-version From b125bdddb4d91bdc1f7f23d4242e343f851efa01 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 8 May 2024 16:10:41 -0400 Subject: [PATCH 107/533] feat(api): adding file purposes (#831) --- .stats.yml | 2 +- src/resources/files.ts | 18 +++++++++--------- tests/api-resources/files.test.ts | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.stats.yml b/.stats.yml index 49956282b..50c6b293d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-edb5af3ade0cd27cf366b0654b90c7a81c43c433e11fc3f6e621e2c779de10d4.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2e14236d4015bf3b956290ea8b656224a0c7b206a356c6af2a7ae43fdbceb04c.yml diff --git a/src/resources/files.ts b/src/resources/files.ts index 820c7a1fa..63dff5bd4 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -148,10 +148,11 @@ export interface FileObject { object: 'file'; /** - * The intended purpose of the file. Supported values are `fine-tune`, - * `fine-tune-results`, `assistants`, and `assistants_output`. + * The intended purpose of the file. Supported values are `assistants`, + * `assistants_output`, `batch`, `batch_output`, `fine-tune`, and + * `fine-tune-results`. */ - purpose: 'fine-tune' | 'fine-tune-results' | 'assistants' | 'assistants_output'; + purpose: 'assistants' | 'assistants_output' | 'batch' | 'batch_output' | 'fine-tune' | 'fine-tune-results'; /** * @deprecated: Deprecated. The current status of the file, which can be either @@ -175,14 +176,13 @@ export interface FileCreateParams { /** * The intended purpose of the uploaded file. * - * Use "fine-tune" for - * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning) and - * "assistants" for + * Use "assistants" for * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - * [Messages](https://platform.openai.com/docs/api-reference/messages). This allows - * us to validate the format of the uploaded file is correct for fine-tuning. + * [Messages](https://platform.openai.com/docs/api-reference/messages), "batch" for + * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for + * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). */ - purpose: 'fine-tune' | 'assistants'; + purpose: 'assistants' | 'batch' | 'fine-tune'; } export interface FileListParams { diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index 514f42e3a..2fda1c947 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -12,7 +12,7 @@ describe('resource files', () => { test('create: only required params', async () => { const responsePromise = openai.files.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), - purpose: 'fine-tune', + purpose: 'assistants', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -26,7 +26,7 @@ describe('resource files', () => { test('create: required and optional params', async () => { const response = await openai.files.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), - purpose: 'fine-tune', + purpose: 'assistants', }); }); From 579edb5f89896b99be0b35e112455cef9b864bc0 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 8 May 2024 16:11:02 -0400 Subject: [PATCH 108/533] release: 4.43.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index bca107b6c..f533aa156 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.42.0" + ".": "4.43.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 98885d747..18e728d02 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.43.0 (2024-05-08) + +Full Changelog: [v4.42.0...v4.43.0](https://github.com/openai/openai-node/compare/v4.42.0...v4.43.0) + +### Features + +* **api:** adding file purposes ([#831](https://github.com/openai/openai-node/issues/831)) ([a62b877](https://github.com/openai/openai-node/commit/a62b8779ff7261cdd6aa7bf72fb6407cc7e3fd21)) + ## 4.42.0 (2024-05-06) Full Changelog: [v4.41.1...v4.42.0](https://github.com/openai/openai-node/compare/v4.41.1...v4.42.0) diff --git a/README.md b/README.md index e4c0ad332..621b53a84 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.42.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.43.0/mod.ts'; ``` diff --git a/package.json b/package.json index 97854fcd8..434e0686b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.42.0", + "version": "4.43.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 358ed3685..7badf1191 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.42.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.43.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index c1a790c33..8be389808 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.42.0'; // x-release-please-version +export const VERSION = '4.43.0'; // x-release-please-version From f0a2d8d55e8b68dd9870cb29bb3cb7d5468b0fad Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 9 May 2024 16:19:10 -0400 Subject: [PATCH 109/533] feat(api): add message image content (#834) --- .stats.yml | 2 +- api.md | 6 + src/lib/AssistantStream.ts | 11 +- src/resources/beta/threads/index.ts | 6 + src/resources/beta/threads/messages.ts | 110 +++++++++++++++++- src/resources/beta/threads/runs/runs.ts | 17 +-- src/resources/beta/threads/threads.ts | 22 ++-- src/resources/files.ts | 16 ++- .../beta/threads/messages.test.ts | 7 +- .../beta/threads/runs/runs.test.ts | 6 +- .../beta/threads/threads.test.ts | 12 +- 11 files changed, 171 insertions(+), 44 deletions(-) diff --git a/.stats.yml b/.stats.yml index 50c6b293d..52e87d1b5 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2e14236d4015bf3b956290ea8b656224a0c7b206a356c6af2a7ae43fdbceb04c.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-084b8f68408c6b689a55200a78bcf233769bfcd8e999d9fadaeb399152b05bcd.yml diff --git a/api.md b/api.md index 18cdd9e62..17a3f9632 100644 --- a/api.md +++ b/api.md @@ -349,14 +349,20 @@ Types: - ImageFileContentBlock - ImageFileDelta - ImageFileDeltaBlock +- ImageURL +- ImageURLContentBlock +- ImageURLDelta +- ImageURLDeltaBlock - Message - MessageContent - MessageContentDelta +- MessageContentPartParam - MessageDeleted - MessageDelta - MessageDeltaEvent - Text - TextContentBlock +- TextContentBlockParam - TextDelta - TextDeltaBlock diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index a2974826c..de7511b5d 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -7,6 +7,7 @@ import { ImageFile, TextDelta, Messages, + MessageContent, } from 'openai/resources/beta/threads/messages'; import * as Core from 'openai/core'; import { RequestOptions } from 'openai/core'; @@ -87,7 +88,7 @@ export class AssistantStream #messageSnapshot: Message | undefined; #finalRun: Run | undefined; #currentContentIndex: number | undefined; - #currentContent: TextContentBlock | ImageFileContentBlock | undefined; + #currentContent: MessageContent | undefined; #currentToolCallIndex: number | undefined; #currentToolCall: ToolCall | undefined; @@ -624,10 +625,8 @@ export class AssistantStream currentContent, ); } else { - snapshot.content[contentElement.index] = contentElement as - | TextContentBlock - | ImageFileContentBlock; - //This is a new element + snapshot.content[contentElement.index] = contentElement as MessageContent; + // This is a new element newContent.push(contentElement); } } @@ -650,7 +649,7 @@ export class AssistantStream #accumulateContent( contentElement: MessageContentDelta, - currentContent: TextContentBlock | ImageFileContentBlock | undefined, + currentContent: MessageContent | undefined, ): TextContentBlock | ImageFileContentBlock { return AssistantStream.accumulateDelta(currentContent as unknown as Record, contentElement) as | TextContentBlock diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index d0ebb1798..b55f67edf 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -11,14 +11,20 @@ export { ImageFileContentBlock, ImageFileDelta, ImageFileDeltaBlock, + ImageURL, + ImageURLContentBlock, + ImageURLDelta, + ImageURLDeltaBlock, Message, MessageContent, MessageContentDelta, + MessageContentPartParam, MessageDeleted, MessageDelta, MessageDeltaEvent, Text, TextContentBlock, + TextContentBlockParam, TextDelta, TextDeltaBlock, MessageCreateParams, diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 8ce714f58..a1f90e1e4 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -249,9 +249,16 @@ export namespace FilePathDeltaAnnotation { export interface ImageFile { /** * The [File](https://platform.openai.com/docs/api-reference/files) ID of the image - * in the message content. + * in the message content. Set `purpose="vision"` when uploading the File if you + * need to later display the file content. */ file_id: string; + + /** + * Specifies the detail level of the image if specified by the user. `low` uses + * fewer tokens, you can opt in to high resolution using `high`. + */ + detail?: 'auto' | 'low' | 'high'; } /** @@ -268,9 +275,16 @@ export interface ImageFileContentBlock { } export interface ImageFileDelta { + /** + * Specifies the detail level of the image if specified by the user. `low` uses + * fewer tokens, you can opt in to high resolution using `high`. + */ + detail?: 'auto' | 'low' | 'high'; + /** * The [File](https://platform.openai.com/docs/api-reference/files) ID of the image - * in the message content. + * in the message content. Set `purpose="vision"` when uploading the File if you + * need to later display the file content. */ file_id?: string; } @@ -293,6 +307,63 @@ export interface ImageFileDeltaBlock { image_file?: ImageFileDelta; } +export interface ImageURL { + /** + * The external URL of the image, must be a supported image types: jpeg, jpg, png, + * gif, webp. + */ + url: string; + + /** + * Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + * to high resolution using `high`. Default value is `auto` + */ + detail?: 'auto' | 'low' | 'high'; +} + +/** + * References an image URL in the content of a message. + */ +export interface ImageURLContentBlock { + image_url: ImageURL; + + /** + * The type of the content part. + */ + type: 'image_url'; +} + +export interface ImageURLDelta { + /** + * Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + * to high resolution using `high`. + */ + detail?: 'auto' | 'low' | 'high'; + + /** + * The URL of the image, must be a supported image types: jpeg, jpg, png, gif, + * webp. + */ + url?: string; +} + +/** + * References an image URL in the content of a message. + */ +export interface ImageURLDeltaBlock { + /** + * The index of the content part in the message. + */ + index: number; + + /** + * Always `image_url`. + */ + type: 'image_url'; + + image_url?: ImageURLDelta; +} + /** * Represents a message within a * [thread](https://platform.openai.com/docs/api-reference/threads). @@ -406,13 +477,19 @@ export namespace Message { * References an image [File](https://platform.openai.com/docs/api-reference/files) * in the content of a message. */ -export type MessageContent = ImageFileContentBlock | TextContentBlock; +export type MessageContent = ImageFileContentBlock | ImageURLContentBlock | TextContentBlock; + +/** + * References an image [File](https://platform.openai.com/docs/api-reference/files) + * in the content of a message. + */ +export type MessageContentDelta = ImageFileDeltaBlock | TextDeltaBlock | ImageURLDeltaBlock; /** * References an image [File](https://platform.openai.com/docs/api-reference/files) * in the content of a message. */ -export type MessageContentDelta = ImageFileDeltaBlock | TextDeltaBlock; +export type MessageContentPartParam = ImageFileContentBlock | ImageURLContentBlock | TextContentBlockParam; export interface MessageDeleted { id: string; @@ -479,6 +556,21 @@ export interface TextContentBlock { type: 'text'; } +/** + * The text content that is part of a message. + */ +export interface TextContentBlockParam { + /** + * Text content to be sent to the model + */ + text: string; + + /** + * Always `text`. + */ + type: 'text'; +} + export interface TextDelta { annotations?: Array; @@ -507,9 +599,9 @@ export interface TextDeltaBlock { export interface MessageCreateParams { /** - * The content of the message. + * The text contents of the message. */ - content: string; + content: string | Array; /** * The role of the entity that is creating the message. Allowed values include: @@ -591,14 +683,20 @@ export namespace Messages { export import ImageFileContentBlock = MessagesAPI.ImageFileContentBlock; export import ImageFileDelta = MessagesAPI.ImageFileDelta; export import ImageFileDeltaBlock = MessagesAPI.ImageFileDeltaBlock; + export import ImageURL = MessagesAPI.ImageURL; + export import ImageURLContentBlock = MessagesAPI.ImageURLContentBlock; + export import ImageURLDelta = MessagesAPI.ImageURLDelta; + export import ImageURLDeltaBlock = MessagesAPI.ImageURLDeltaBlock; export import Message = MessagesAPI.Message; export import MessageContent = MessagesAPI.MessageContent; export import MessageContentDelta = MessagesAPI.MessageContentDelta; + export import MessageContentPartParam = MessagesAPI.MessageContentPartParam; export import MessageDeleted = MessagesAPI.MessageDeleted; export import MessageDelta = MessagesAPI.MessageDelta; export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent; export import Text = MessagesAPI.Text; export import TextContentBlock = MessagesAPI.TextContentBlock; + export import TextContentBlockParam = MessagesAPI.TextContentBlockParam; export import TextDelta = MessagesAPI.TextDelta; export import TextDeltaBlock = MessagesAPI.TextDeltaBlock; export import MessagesPage = MessagesAPI.MessagesPage; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 18095886a..d188edb2d 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -9,6 +9,7 @@ import { sleep } from 'openai/core'; import { RunSubmitToolOutputsParamsStream } from 'openai/lib/AssistantStream'; import * as RunsAPI from 'openai/resources/beta/threads/runs/runs'; import * as AssistantsAPI from 'openai/resources/beta/assistants'; +import * as MessagesAPI from 'openai/resources/beta/threads/messages'; import * as ThreadsAPI from 'openai/resources/beta/threads/threads'; import * as StepsAPI from 'openai/resources/beta/threads/runs/steps'; import { CursorPage, type CursorPageParams } from 'openai/pagination'; @@ -747,9 +748,9 @@ export interface RunCreateParamsBase { export namespace RunCreateParams { export interface AdditionalMessage { /** - * The content of the message. + * The text contents of the message. */ - content: string; + content: string | Array; /** * The role of the entity that is creating the message. Allowed values include: @@ -999,9 +1000,9 @@ export interface RunCreateAndPollParams { export namespace RunCreateAndPollParams { export interface AdditionalMessage { /** - * The content of the message. + * The text contents of the message. */ - content: string; + content: string | Array; /** * The role of the entity that is creating the message. Allowed values include: @@ -1204,9 +1205,9 @@ export interface RunCreateAndStreamParams { export namespace RunCreateAndStreamParams { export interface AdditionalMessage { /** - * The content of the message. + * The text contents of the message. */ - content: string; + content: string | Array; /** * The role of the entity that is creating the message. Allowed values include: @@ -1409,9 +1410,9 @@ export interface RunStreamParams { export namespace RunStreamParams { export interface AdditionalMessage { /** - * The content of the message. + * The text contents of the message. */ - content: string; + content: string | Array; /** * The role of the entity that is creating the message. Allowed values include: diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index b8b3ff2be..7bd86fa50 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -284,9 +284,9 @@ export interface ThreadCreateParams { export namespace ThreadCreateParams { export interface Message { /** - * The content of the message. + * The text contents of the message. */ - content: string; + content: string | Array; /** * The role of the entity that is creating the message. Allowed values include: @@ -623,9 +623,9 @@ export namespace ThreadCreateAndRunParams { export namespace Thread { export interface Message { /** - * The content of the message. + * The text contents of the message. */ - content: string; + content: string | Array; /** * The role of the entity that is creating the message. Allowed values include: @@ -973,9 +973,9 @@ export namespace ThreadCreateAndRunPollParams { export namespace Thread { export interface Message { /** - * The content of the message. + * The text contents of the message. */ - content: string; + content: string | Array; /** * The role of the entity that is creating the message. Allowed values include: @@ -1302,9 +1302,9 @@ export namespace ThreadCreateAndRunStreamParams { export namespace Thread { export interface Message { /** - * The content of the message. + * The text contents of the message. */ - content: string; + content: string | Array; /** * The role of the entity that is creating the message. Allowed values include: @@ -1503,14 +1503,20 @@ export namespace Threads { export import ImageFileContentBlock = MessagesAPI.ImageFileContentBlock; export import ImageFileDelta = MessagesAPI.ImageFileDelta; export import ImageFileDeltaBlock = MessagesAPI.ImageFileDeltaBlock; + export import ImageURL = MessagesAPI.ImageURL; + export import ImageURLContentBlock = MessagesAPI.ImageURLContentBlock; + export import ImageURLDelta = MessagesAPI.ImageURLDelta; + export import ImageURLDeltaBlock = MessagesAPI.ImageURLDeltaBlock; export import Message = MessagesAPI.Message; export import MessageContent = MessagesAPI.MessageContent; export import MessageContentDelta = MessagesAPI.MessageContentDelta; + export import MessageContentPartParam = MessagesAPI.MessageContentPartParam; export import MessageDeleted = MessagesAPI.MessageDeleted; export import MessageDelta = MessagesAPI.MessageDelta; export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent; export import Text = MessagesAPI.Text; export import TextContentBlock = MessagesAPI.TextContentBlock; + export import TextContentBlockParam = MessagesAPI.TextContentBlockParam; export import TextDelta = MessagesAPI.TextDelta; export import TextDeltaBlock = MessagesAPI.TextDeltaBlock; export import MessagesPage = MessagesAPI.MessagesPage; diff --git a/src/resources/files.ts b/src/resources/files.ts index 63dff5bd4..5d284a071 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -149,10 +149,17 @@ export interface FileObject { /** * The intended purpose of the file. Supported values are `assistants`, - * `assistants_output`, `batch`, `batch_output`, `fine-tune`, and - * `fine-tune-results`. + * `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` + * and `vision`. */ - purpose: 'assistants' | 'assistants_output' | 'batch' | 'batch_output' | 'fine-tune' | 'fine-tune-results'; + purpose: + | 'assistants' + | 'assistants_output' + | 'batch' + | 'batch_output' + | 'fine-tune' + | 'fine-tune-results' + | 'vision'; /** * @deprecated: Deprecated. The current status of the file, which can be either @@ -178,7 +185,8 @@ export interface FileCreateParams { * * Use "assistants" for * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - * [Messages](https://platform.openai.com/docs/api-reference/messages), "batch" for + * [Message](https://platform.openai.com/docs/api-reference/messages) files, + * "vision" for Assistants image file inputs, "batch" for * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). */ diff --git a/tests/api-resources/beta/threads/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts index 262ff178d..01268586c 100644 --- a/tests/api-resources/beta/threads/messages.test.ts +++ b/tests/api-resources/beta/threads/messages.test.ts @@ -10,7 +10,10 @@ const openai = new OpenAI({ describe('resource messages', () => { test('create: only required params', async () => { - const responsePromise = openai.beta.threads.messages.create('string', { content: 'x', role: 'user' }); + const responsePromise = openai.beta.threads.messages.create('string', { + content: 'string', + role: 'user', + }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -22,7 +25,7 @@ describe('resource messages', () => { test('create: required and optional params', async () => { const response = await openai.beta.threads.messages.create('string', { - content: 'x', + content: 'string', role: 'user', attachments: [ { diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 85d97c34c..3ee6ecb4e 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -27,7 +27,7 @@ describe('resource runs', () => { additional_messages: [ { role: 'user', - content: 'x', + content: 'string', attachments: [ { file_id: 'string', @@ -58,7 +58,7 @@ describe('resource runs', () => { }, { role: 'user', - content: 'x', + content: 'string', attachments: [ { file_id: 'string', @@ -89,7 +89,7 @@ describe('resource runs', () => { }, { role: 'user', - content: 'x', + content: 'string', attachments: [ { file_id: 'string', diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index f2521cd5b..4c4256258 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -35,7 +35,7 @@ describe('resource threads', () => { messages: [ { role: 'user', - content: 'x', + content: 'string', attachments: [ { file_id: 'string', @@ -66,7 +66,7 @@ describe('resource threads', () => { }, { role: 'user', - content: 'x', + content: 'string', attachments: [ { file_id: 'string', @@ -97,7 +97,7 @@ describe('resource threads', () => { }, { role: 'user', - content: 'x', + content: 'string', attachments: [ { file_id: 'string', @@ -214,7 +214,7 @@ describe('resource threads', () => { messages: [ { role: 'user', - content: 'x', + content: 'string', attachments: [ { file_id: 'string', @@ -245,7 +245,7 @@ describe('resource threads', () => { }, { role: 'user', - content: 'x', + content: 'string', attachments: [ { file_id: 'string', @@ -276,7 +276,7 @@ describe('resource threads', () => { }, { role: 'user', - content: 'x', + content: 'string', attachments: [ { file_id: 'string', From 2a12cfdef022cf8e74a4d08e9db6ad3ad42fea95 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 9 May 2024 16:19:29 -0400 Subject: [PATCH 110/533] release: 4.44.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f533aa156..7f5d28dff 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.43.0" + ".": "4.44.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 18e728d02..ecdbfdb14 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.44.0 (2024-05-09) + +Full Changelog: [v4.43.0...v4.44.0](https://github.com/openai/openai-node/compare/v4.43.0...v4.44.0) + +### Features + +* **api:** add message image content ([#834](https://github.com/openai/openai-node/issues/834)) ([7757b3e](https://github.com/openai/openai-node/commit/7757b3ea54a2c5cc251f55af0b676952ba12e8a6)) + ## 4.43.0 (2024-05-08) Full Changelog: [v4.42.0...v4.43.0](https://github.com/openai/openai-node/compare/v4.42.0...v4.43.0) diff --git a/README.md b/README.md index 621b53a84..397bb2185 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.43.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.44.0/mod.ts'; ``` diff --git a/package.json b/package.json index 434e0686b..e70c0fd09 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.43.0", + "version": "4.44.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 7badf1191..015d307ea 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.43.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.44.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 8be389808..4ebba76ed 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.43.0'; // x-release-please-version +export const VERSION = '4.44.0'; // x-release-please-version From babb1404751059bdd171b792d03fd21272dd8f8b Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 10 May 2024 13:26:55 -0400 Subject: [PATCH 111/533] chore(dependency): bumped Next.js version (#836) --- ecosystem-tests/vercel-edge/package-lock.json | 119 ++++++++---------- ecosystem-tests/vercel-edge/package.json | 2 +- examples/package.json | 2 +- 3 files changed, 53 insertions(+), 70 deletions(-) diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json index fdfe2952d..bc820a010 100644 --- a/ecosystem-tests/vercel-edge/package-lock.json +++ b/ecosystem-tests/vercel-edge/package-lock.json @@ -9,7 +9,7 @@ "version": "0.1.0", "dependencies": { "ai": "2.1.34", - "next": "13.5.6", + "next": "14.1.1", "react": "18.2.0", "react-dom": "18.2.0" }, @@ -1180,14 +1180,14 @@ } }, "node_modules/@next/env": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/env/-/env-13.5.6.tgz", - "integrity": "sha512-Yac/bV5sBGkkEXmAX5FWPS9Mmo2rthrOPRQQNfycJPkjUAUclomCPH7QFVCDQ4Mp2k2K1SSM6m0zrxYrOwtFQw==" + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.1.1.tgz", + "integrity": "sha512-7CnQyD5G8shHxQIIg3c7/pSeYFeMhsNbpU/bmvH7ZnDql7mNRgg8O2JZrhrc/soFnfBnKP4/xXNiiSIPn2w8gA==" }, "node_modules/@next/swc-darwin-arm64": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.5.6.tgz", - "integrity": "sha512-5nvXMzKtZfvcu4BhtV0KH1oGv4XEW+B+jOfmBdpFI3C7FrB/MfujRpWYSBBO64+qbW8pkZiSyQv9eiwnn5VIQA==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.1.1.tgz", + "integrity": "sha512-yDjSFKQKTIjyT7cFv+DqQfW5jsD+tVxXTckSe1KIouKk75t1qZmj/mV3wzdmFb0XHVGtyRjDMulfVG8uCKemOQ==", "cpu": [ "arm64" ], @@ -1200,9 +1200,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.5.6.tgz", - "integrity": "sha512-6cgBfxg98oOCSr4BckWjLLgiVwlL3vlLj8hXg2b+nDgm4bC/qVXXLfpLB9FHdoDu4057hzywbxKvmYGmi7yUzA==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.1.1.tgz", + "integrity": "sha512-KCQmBL0CmFmN8D64FHIZVD9I4ugQsDBBEJKiblXGgwn7wBCSe8N4Dx47sdzl4JAg39IkSN5NNrr8AniXLMb3aw==", "cpu": [ "x64" ], @@ -1215,9 +1215,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.5.6.tgz", - "integrity": "sha512-txagBbj1e1w47YQjcKgSU4rRVQ7uF29YpnlHV5xuVUsgCUf2FmyfJ3CPjZUvpIeXCJAoMCFAoGnbtX86BK7+sg==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.1.1.tgz", + "integrity": "sha512-YDQfbWyW0JMKhJf/T4eyFr4b3tceTorQ5w2n7I0mNVTFOvu6CGEzfwT3RSAQGTi/FFMTFcuspPec/7dFHuP7Eg==", "cpu": [ "arm64" ], @@ -1230,9 +1230,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.5.6.tgz", - "integrity": "sha512-cGd+H8amifT86ZldVJtAKDxUqeFyLWW+v2NlBULnLAdWsiuuN8TuhVBt8ZNpCqcAuoruoSWynvMWixTFcroq+Q==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.1.1.tgz", + "integrity": "sha512-fiuN/OG6sNGRN/bRFxRvV5LyzLB8gaL8cbDH5o3mEiVwfcMzyE5T//ilMmaTrnA8HLMS6hoz4cHOu6Qcp9vxgQ==", "cpu": [ "arm64" ], @@ -1245,9 +1245,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.5.6.tgz", - "integrity": "sha512-Mc2b4xiIWKXIhBy2NBTwOxGD3nHLmq4keFk+d4/WL5fMsB8XdJRdtUlL87SqVCTSaf1BRuQQf1HvXZcy+rq3Nw==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.1.1.tgz", + "integrity": "sha512-rv6AAdEXoezjbdfp3ouMuVqeLjE1Bin0AuE6qxE6V9g3Giz5/R3xpocHoAi7CufRR+lnkuUjRBn05SYJ83oKNQ==", "cpu": [ "x64" ], @@ -1260,9 +1260,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.5.6.tgz", - "integrity": "sha512-CFHvP9Qz98NruJiUnCe61O6GveKKHpJLloXbDSWRhqhkJdZD2zU5hG+gtVJR//tyW897izuHpM6Gtf6+sNgJPQ==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.1.1.tgz", + "integrity": "sha512-YAZLGsaNeChSrpz/G7MxO3TIBLaMN8QWMr3X8bt6rCvKovwU7GqQlDu99WdvF33kI8ZahvcdbFsy4jAFzFX7og==", "cpu": [ "x64" ], @@ -1275,9 +1275,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.5.6.tgz", - "integrity": "sha512-aFv1ejfkbS7PUa1qVPwzDHjQWQtknzAZWGTKYIAaS4NMtBlk3VyA6AYn593pqNanlicewqyl2jUhQAaFV/qXsg==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.1.1.tgz", + "integrity": "sha512-1L4mUYPBMvVDMZg1inUYyPvFSduot0g73hgfD9CODgbr4xiTYe0VOMTZzaRqYJYBA9mana0x4eaAaypmWo1r5A==", "cpu": [ "arm64" ], @@ -1290,9 +1290,9 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.5.6.tgz", - "integrity": "sha512-XqqpHgEIlBHvzwG8sp/JXMFkLAfGLqkbVsyN+/Ih1mR8INb6YCc2x/Mbwi6hsAgUnqQztz8cvEbHJUbSl7RHDg==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.1.1.tgz", + "integrity": "sha512-jvIE9tsuj9vpbbXlR5YxrghRfMuG0Qm/nZ/1KDHc+y6FpnZ/apsgh+G6t15vefU0zp3WSpTMIdXRUsNl/7RSuw==", "cpu": [ "ia32" ], @@ -1305,9 +1305,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.5.6.tgz", - "integrity": "sha512-Cqfe1YmOS7k+5mGu92nl5ULkzpKuxJrP3+4AEuPmrpFZ3BHxTY3TnHmU1On3bFmFFs6FbTcdF58CCUProGpIGQ==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.1.1.tgz", + "integrity": "sha512-S6K6EHDU5+1KrBDLko7/c1MNy/Ya73pIAmvKeFwsF4RmBFJSO7/7YeD4FnZ4iBdzE69PpQ4sOMU9ORKeNuxe8A==", "cpu": [ "x64" ], @@ -2566,9 +2566,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001524", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001524.tgz", - "integrity": "sha512-Jj917pJtYg9HSJBF95HVX3Cdr89JUyLT4IZ8SvM5aDRni95swKgYi3TgYLH5hnGfPE/U1dg6IfZ50UsIlLkwSA==", + "version": "1.0.30001617", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001617.tgz", + "integrity": "sha512-mLyjzNI9I+Pix8zwcrpxEbGlfqOkF9kM3ptzmKNw5tizSyYwMe+nGLTqMK9cO+0E+Bh6TsBxNAaHWEM8xwSsmA==", "funding": [ { "type": "opencollective", @@ -3780,11 +3780,6 @@ "node": ">= 6" } }, - "node_modules/glob-to-regexp": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" - }, "node_modules/globals": { "version": "11.12.0", "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", @@ -5066,34 +5061,34 @@ "dev": true }, "node_modules/next": { - "version": "13.5.6", - "resolved": "https://registry.npmjs.org/next/-/next-13.5.6.tgz", - "integrity": "sha512-Y2wTcTbO4WwEsVb4A8VSnOsG1I9ok+h74q0ZdxkwM3EODqrs4pasq7O0iUxbcS9VtWMicG7f3+HAj0r1+NtKSw==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/next/-/next-14.1.1.tgz", + "integrity": "sha512-McrGJqlGSHeaz2yTRPkEucxQKe5Zq7uPwyeHNmJaZNY4wx9E9QdxmTp310agFRoMuIYgQrCrT3petg13fSVOww==", "dependencies": { - "@next/env": "13.5.6", + "@next/env": "14.1.1", "@swc/helpers": "0.5.2", "busboy": "1.6.0", - "caniuse-lite": "^1.0.30001406", + "caniuse-lite": "^1.0.30001579", + "graceful-fs": "^4.2.11", "postcss": "8.4.31", - "styled-jsx": "5.1.1", - "watchpack": "2.4.0" + "styled-jsx": "5.1.1" }, "bin": { "next": "dist/bin/next" }, "engines": { - "node": ">=16.14.0" + "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "13.5.6", - "@next/swc-darwin-x64": "13.5.6", - "@next/swc-linux-arm64-gnu": "13.5.6", - "@next/swc-linux-arm64-musl": "13.5.6", - "@next/swc-linux-x64-gnu": "13.5.6", - "@next/swc-linux-x64-musl": "13.5.6", - "@next/swc-win32-arm64-msvc": "13.5.6", - "@next/swc-win32-ia32-msvc": "13.5.6", - "@next/swc-win32-x64-msvc": "13.5.6" + "@next/swc-darwin-arm64": "14.1.1", + "@next/swc-darwin-x64": "14.1.1", + "@next/swc-linux-arm64-gnu": "14.1.1", + "@next/swc-linux-arm64-musl": "14.1.1", + "@next/swc-linux-x64-gnu": "14.1.1", + "@next/swc-linux-x64-musl": "14.1.1", + "@next/swc-win32-arm64-msvc": "14.1.1", + "@next/swc-win32-ia32-msvc": "14.1.1", + "@next/swc-win32-x64-msvc": "14.1.1" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", @@ -6554,18 +6549,6 @@ "makeerror": "1.0.12" } }, - "node_modules/watchpack": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", - "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", - "dependencies": { - "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.1.2" - }, - "engines": { - "node": ">=10.13.0" - } - }, "node_modules/web-vitals": { "version": "0.2.4", "resolved": "https://registry.npmjs.org/web-vitals/-/web-vitals-0.2.4.tgz", diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json index 48223796c..4c75dd4fd 100644 --- a/ecosystem-tests/vercel-edge/package.json +++ b/ecosystem-tests/vercel-edge/package.json @@ -15,7 +15,7 @@ }, "dependencies": { "ai": "2.1.34", - "next": "13.5.6", + "next": "14.1.1", "react": "18.2.0", "react-dom": "18.2.0" }, diff --git a/examples/package.json b/examples/package.json index 3b27b221f..04ed507b9 100644 --- a/examples/package.json +++ b/examples/package.json @@ -7,7 +7,7 @@ "private": true, "dependencies": { "express": "^4.18.2", - "next": "^13.5.5", + "next": "^14.1.1", "openai": "file:..", "zod-to-json-schema": "^3.21.4" }, From 6e556d9e12341155cc13fe226ab110d63858370e Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 10 May 2024 15:30:39 -0400 Subject: [PATCH 112/533] chore(docs): add SECURITY.md (#838) --- SECURITY.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..c54acaf33 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,29 @@ +# Security Policy + +## Reporting Security Issues + +This SDK is generated by [Stainless Software Inc](http://stainlessapi.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. + +To report a security issue, please contact the Stainless team at security@stainlessapi.com. + +## Responsible Disclosure + +We appreciate the efforts of security researchers and individuals who help us maintain the security of +SDKs we generate. If you believe you have found a security vulnerability, please adhere to responsible +disclosure practices by allowing us a reasonable amount of time to investigate and address the issue +before making any information public. + +## Reporting Non-SDK Related Security Issues + +If you encounter security issues that are not directly related to SDKs but pertain to the services +or products provided by OpenAI please follow the respective company's security reporting guidelines. + +### OpenAI Terms and Policies + +Our Security Policy can be found at [Security Policy URL](https://openai.com/policies/coordinated-vulnerability-disclosure-policy). + +Please contact disclosure@openai.com for any questions or concerns regarding security of our services. + +--- + +Thank you for helping us keep the SDKs and systems they interact with secure. From e279f8c51aa80cb913ccb6df647407bea1f2f071 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 10 May 2024 16:42:25 -0400 Subject: [PATCH 113/533] feat(azure): batch api (#839) --- src/index.ts | 11 +- tests/lib/azure.test.ts | 274 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 279 insertions(+), 6 deletions(-) diff --git a/src/index.ts b/src/index.ts index b146a7bab..f5c8a0fe1 100644 --- a/src/index.ts +++ b/src/index.ts @@ -346,6 +346,7 @@ export interface AzureClientOptions extends ClientOptions { /** API Client for interfacing with the Azure OpenAI API. */ export class AzureOpenAI extends OpenAI { private _azureADTokenProvider: (() => Promise) | undefined; + private _deployment: string | undefined; apiVersion: string = ''; /** * API Client for interfacing with the Azure OpenAI API. @@ -412,11 +413,7 @@ export class AzureOpenAI extends OpenAI { ); } - if (deployment) { - baseURL = `${endpoint}/openai/deployments/${deployment}`; - } else { - baseURL = `${endpoint}/openai`; - } + baseURL = `${endpoint}/openai`; } else { if (endpoint) { throw new Errors.OpenAIError('baseURL and endpoint are mutually exclusive'); @@ -432,6 +429,7 @@ export class AzureOpenAI extends OpenAI { this._azureADTokenProvider = azureADTokenProvider; this.apiVersion = apiVersion; + this._deployment = deployment; } override buildRequest(options: Core.FinalRequestOptions): { @@ -443,7 +441,7 @@ export class AzureOpenAI extends OpenAI { if (!Core.isObj(options.body)) { throw new Error('Expected request body to be an object'); } - const model = options.body['model']; + const model = this._deployment || options.body['model']; delete options.body['model']; if (model !== undefined && !this.baseURL.includes('/deployments')) { options.path = `/deployments/${model}${options.path}`; @@ -494,6 +492,7 @@ const _deployments_endpoints = new Set([ '/audio/translations', '/audio/speech', '/images/generations', + '/batches', ]); const API_KEY_SENTINEL = ''; diff --git a/tests/lib/azure.test.ts b/tests/lib/azure.test.ts index 4895273be..32b59ae33 100644 --- a/tests/lib/azure.test.ts +++ b/tests/lib/azure.test.ts @@ -4,6 +4,8 @@ import { Headers } from 'openai/core'; import defaultFetch, { Response, type RequestInit, type RequestInfo } from 'node-fetch'; const apiVersion = '2024-02-15-preview'; +const deployment = 'deployment'; +const model = 'unused model'; describe('instantiate azure client', () => { const env = process.env; @@ -275,6 +277,278 @@ describe('instantiate azure client', () => { describe('azure request building', () => { const client = new AzureOpenAI({ baseURL: 'https://example.com/', apiKey: 'My API Key', apiVersion }); + describe('model to deployment mapping', function () { + const testFetch = async (url: RequestInfo): Promise => { + return new Response(JSON.stringify({ url }), { headers: { 'content-type': 'application/json' } }); + }; + describe('with client-level deployment', function () { + const client = new AzureOpenAI({ + endpoint: 'https://example.com/', + apiKey: 'My API Key', + apiVersion, + deployment, + fetch: testFetch, + }); + + test('handles Batch', async () => { + expect( + await client.batches.create({ + completion_window: '24h', + endpoint: '/v1/chat/completions', + input_file_id: 'file-id', + }), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/batches?api-version=${apiVersion}`, + }); + }); + + test('handles completions', async () => { + expect( + await client.completions.create({ + model, + prompt: 'prompt', + }), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/completions?api-version=${apiVersion}`, + }); + }); + + test('handles chat completions', async () => { + expect( + await client.chat.completions.create({ + model, + messages: [{ role: 'system', content: 'Hello' }], + }), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/chat/completions?api-version=${apiVersion}`, + }); + }); + + test('handles embeddings', async () => { + expect( + await client.embeddings.create({ + model, + input: 'input', + }), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/embeddings?api-version=${apiVersion}`, + }); + }); + + test('handles audio translations', async () => { + expect( + await client.audio.translations.create({ + model, + file: { url: 'https://example.com/', blob: () => 0 as any }, + }), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/audio/translations?api-version=${apiVersion}`, + }); + }); + + test('handles audio transcriptions', async () => { + expect( + await client.audio.transcriptions.create({ + model, + file: { url: 'https://example.com/', blob: () => 0 as any }, + }), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/audio/transcriptions?api-version=${apiVersion}`, + }); + }); + + test('handles text to speech', async () => { + expect( + await ( + await client.audio.speech.create({ + model, + input: '', + voice: 'alloy', + }) + ).json(), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/audio/speech?api-version=${apiVersion}`, + }); + }); + + test('handles image generation', async () => { + expect( + await client.images.generate({ + model, + prompt: 'prompt', + }), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/images/generations?api-version=${apiVersion}`, + }); + }); + + test('handles assistants', async () => { + expect( + await client.beta.assistants.create({ + model, + }), + ).toStrictEqual({ + url: `https://example.com/openai/assistants?api-version=${apiVersion}`, + }); + }); + + test('handles files', async () => { + expect( + await client.files.create({ + file: { url: 'https://example.com/', blob: () => 0 as any }, + purpose: 'assistants', + }), + ).toStrictEqual({ + url: `https://example.com/openai/files?api-version=${apiVersion}`, + }); + }); + + test('handles fine tuning', async () => { + expect( + await client.fineTuning.jobs.create({ + model, + training_file: '', + }), + ).toStrictEqual({ + url: `https://example.com/openai/fine_tuning/jobs?api-version=${apiVersion}`, + }); + }); + }); + + describe('with no client-level deployment', function () { + const client = new AzureOpenAI({ + endpoint: 'https://example.com/', + apiKey: 'My API Key', + apiVersion, + fetch: testFetch, + }); + + test('Batch is not handled', async () => { + expect( + await client.batches.create({ + completion_window: '24h', + endpoint: '/v1/chat/completions', + input_file_id: 'file-id', + }), + ).toStrictEqual({ + url: `https://example.com/openai/batches?api-version=${apiVersion}`, + }); + }); + + test('handles completions', async () => { + expect( + await client.completions.create({ + model: deployment, + prompt: 'prompt', + }), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/completions?api-version=${apiVersion}`, + }); + }); + + test('handles chat completions', async () => { + expect( + await client.chat.completions.create({ + model: deployment, + messages: [{ role: 'system', content: 'Hello' }], + }), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/chat/completions?api-version=${apiVersion}`, + }); + }); + + test('handles embeddings', async () => { + expect( + await client.embeddings.create({ + model: deployment, + input: 'input', + }), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/embeddings?api-version=${apiVersion}`, + }); + }); + + test('Audio translations is not handled', async () => { + expect( + await client.audio.translations.create({ + model: deployment, + file: { url: 'https://example.com/', blob: () => 0 as any }, + }), + ).toStrictEqual({ + url: `https://example.com/openai/audio/translations?api-version=${apiVersion}`, + }); + }); + + test('Audio transcriptions is not handled', async () => { + expect( + await client.audio.transcriptions.create({ + model: deployment, + file: { url: 'https://example.com/', blob: () => 0 as any }, + }), + ).toStrictEqual({ + url: `https://example.com/openai/audio/transcriptions?api-version=${apiVersion}`, + }); + }); + + test('handles text to speech', async () => { + expect( + await ( + await client.audio.speech.create({ + model: deployment, + input: '', + voice: 'alloy', + }) + ).json(), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/audio/speech?api-version=${apiVersion}`, + }); + }); + + test('handles image generation', async () => { + expect( + await client.images.generate({ + model: deployment, + prompt: 'prompt', + }), + ).toStrictEqual({ + url: `https://example.com/openai/deployments/${deployment}/images/generations?api-version=${apiVersion}`, + }); + }); + + test('handles assistants', async () => { + expect( + await client.beta.assistants.create({ + model, + }), + ).toStrictEqual({ + url: `https://example.com/openai/assistants?api-version=${apiVersion}`, + }); + }); + + test('handles files', async () => { + expect( + await client.files.create({ + file: { url: 'https://example.com/', blob: () => 0 as any }, + purpose: 'assistants', + }), + ).toStrictEqual({ + url: `https://example.com/openai/files?api-version=${apiVersion}`, + }); + }); + + test('handles fine tuning', async () => { + expect( + await client.fineTuning.jobs.create({ + model, + training_file: '', + }), + ).toStrictEqual({ + url: `https://example.com/openai/fine_tuning/jobs?api-version=${apiVersion}`, + }); + }); + }); + }); + describe('Content-Length', () => { test('handles multi-byte characters', () => { const { req } = client.buildRequest({ path: '/foo', method: 'post', body: { value: '—' } }); From 1a4d37a974ffa8a1596ad29bbde47aab99346778 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Sat, 11 May 2024 01:06:02 -0400 Subject: [PATCH 114/533] release: 4.45.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7f5d28dff..fb1cbfe23 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.44.0" + ".": "4.45.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index ecdbfdb14..149d41da9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.45.0 (2024-05-11) + +Full Changelog: [v4.44.0...v4.45.0](https://github.com/openai/openai-node/compare/v4.44.0...v4.45.0) + +### Features + +* **azure:** batch api ([#839](https://github.com/openai/openai-node/issues/839)) ([e279f8c](https://github.com/openai/openai-node/commit/e279f8c51aa80cb913ccb6df647407bea1f2f071)) + + +### Chores + +* **dependency:** bumped Next.js version ([#836](https://github.com/openai/openai-node/issues/836)) ([babb140](https://github.com/openai/openai-node/commit/babb1404751059bdd171b792d03fd21272dd8f8b)) +* **docs:** add SECURITY.md ([#838](https://github.com/openai/openai-node/issues/838)) ([6e556d9](https://github.com/openai/openai-node/commit/6e556d9e12341155cc13fe226ab110d63858370e)) + ## 4.44.0 (2024-05-09) Full Changelog: [v4.43.0...v4.44.0](https://github.com/openai/openai-node/compare/v4.43.0...v4.44.0) diff --git a/README.md b/README.md index 397bb2185..e81093bbe 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.44.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.45.0/mod.ts'; ``` diff --git a/package.json b/package.json index e70c0fd09..c51375344 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.44.0", + "version": "4.45.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 015d307ea..0461f0f3f 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.44.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.45.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 4ebba76ed..2ebf697cb 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.44.0'; // x-release-please-version +export const VERSION = '4.45.0'; // x-release-please-version From 5b0a67cd666733ca4ee3454f2a53beda03198954 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 13 May 2024 14:14:57 -0400 Subject: [PATCH 115/533] feat(api): add gpt-4o model (#841) --- .stats.yml | 2 +- src/resources/beta/assistants.ts | 2 ++ src/resources/beta/threads/runs/runs.ts | 8 ++++++++ src/resources/beta/threads/threads.ts | 6 ++++++ src/resources/chat/chat.ts | 2 ++ 5 files changed, 19 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 52e87d1b5..f44b9b46a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-084b8f68408c6b689a55200a78bcf233769bfcd8e999d9fadaeb399152b05bcd.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-47007cc1aa5bc7b74107a99b377925978a0bd376ed67bdae724e80d5d0b63d57.yml diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index a24cee045..4f3136446 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -1000,6 +1000,8 @@ export interface AssistantCreateParams { */ model: | (string & {}) + | 'gpt-4o' + | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index d188edb2d..267c0944d 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -660,6 +660,8 @@ export interface RunCreateParamsBase { */ model?: | (string & {}) + | 'gpt-4o' + | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' @@ -919,6 +921,8 @@ export interface RunCreateAndPollParams { */ model?: | (string & {}) + | 'gpt-4o' + | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' @@ -1124,6 +1128,8 @@ export interface RunCreateAndStreamParams { */ model?: | (string & {}) + | 'gpt-4o' + | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' @@ -1329,6 +1335,8 @@ export interface RunStreamParams { */ model?: | (string & {}) + | 'gpt-4o' + | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 7bd86fa50..e1fb3a2d4 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -492,6 +492,8 @@ export interface ThreadCreateAndRunParamsBase { */ model?: | (string & {}) + | 'gpt-4o' + | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' @@ -849,6 +851,8 @@ export interface ThreadCreateAndRunPollParams { */ model?: | (string & {}) + | 'gpt-4o' + | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' @@ -1178,6 +1182,8 @@ export interface ThreadCreateAndRunStreamParams { */ model?: | (string & {}) + | 'gpt-4o' + | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index ff271e5b4..925401fe1 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -9,6 +9,8 @@ export class Chat extends APIResource { } export type ChatModel = + | 'gpt-4o' + | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' From 65bcdfe118356e12f567675aff4c99d8ff959b11 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 13 May 2024 14:15:18 -0400 Subject: [PATCH 116/533] release: 4.46.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index fb1cbfe23..492591f96 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.45.0" + ".": "4.46.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 149d41da9..1e35a5974 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.46.0 (2024-05-13) + +Full Changelog: [v4.45.0...v4.46.0](https://github.com/openai/openai-node/compare/v4.45.0...v4.46.0) + +### Features + +* **api:** add gpt-4o model ([#841](https://github.com/openai/openai-node/issues/841)) ([c818ed1](https://github.com/openai/openai-node/commit/c818ed139bfba81af6ca3c4eda08d52366758529)) + ## 4.45.0 (2024-05-11) Full Changelog: [v4.44.0...v4.45.0](https://github.com/openai/openai-node/compare/v4.44.0...v4.45.0) diff --git a/README.md b/README.md index e81093bbe..dd5f4c278 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.45.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.46.0/mod.ts'; ``` diff --git a/package.json b/package.json index c51375344..b2fa486f0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.45.0", + "version": "4.46.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 0461f0f3f..7203ae1fe 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.45.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.46.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 2ebf697cb..ac125a286 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.45.0'; // x-release-please-version +export const VERSION = '4.46.0'; // x-release-please-version From c17fcb789f8b40dd8360c2680a34e96dbde8a97f Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 13 May 2024 15:01:38 -0400 Subject: [PATCH 117/533] refactor: change import paths to be relative (#843) --- src/index.ts | 4 +-- src/resources/audio/audio.ts | 8 +++--- src/resources/audio/speech.ts | 8 +++--- src/resources/audio/transcriptions.ts | 8 +++--- src/resources/audio/translations.ts | 8 +++--- src/resources/batches.ts | 10 +++---- src/resources/beta/assistants.ts | 20 ++++++------- src/resources/beta/beta.ts | 10 +++---- src/resources/beta/chat/chat.ts | 4 +-- src/resources/beta/chat/completions.ts | 28 +++++++++---------- src/resources/beta/threads/messages.ts | 12 ++++---- src/resources/beta/threads/runs/runs.ts | 28 +++++++++---------- src/resources/beta/threads/runs/steps.ts | 10 +++---- src/resources/beta/threads/threads.ts | 20 ++++++------- .../beta/vector-stores/file-batches.ts | 20 ++++++------- src/resources/beta/vector-stores/files.ts | 12 ++++---- .../beta/vector-stores/vector-stores.ts | 14 +++++----- src/resources/chat/chat.ts | 6 ++-- src/resources/chat/completions.ts | 16 +++++------ src/resources/completions.ts | 12 ++++---- src/resources/embeddings.ts | 6 ++-- src/resources/files.ts | 18 ++++++------ src/resources/fine-tuning/fine-tuning.ts | 4 +-- src/resources/fine-tuning/jobs/checkpoints.ts | 10 +++---- src/resources/fine-tuning/jobs/jobs.ts | 12 ++++---- src/resources/images.ts | 8 +++--- src/resources/models.ts | 8 +++--- src/resources/moderations.ts | 6 ++-- 28 files changed, 165 insertions(+), 165 deletions(-) diff --git a/src/index.ts b/src/index.ts index f5c8a0fe1..854536161 100644 --- a/src/index.ts +++ b/src/index.ts @@ -4,8 +4,8 @@ import * as Core from './core'; import * as Errors from './error'; import { type Agent, type RequestInit } from './_shims/index'; import * as Uploads from './uploads'; -import * as Pagination from 'openai/pagination'; -import * as API from 'openai/resources/index'; +import * as Pagination from './pagination'; +import * as API from './resources/index'; export interface ClientOptions { /** diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index f3fcba4c3..a89bf0102 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from 'openai/resource'; -import * as SpeechAPI from 'openai/resources/audio/speech'; -import * as TranscriptionsAPI from 'openai/resources/audio/transcriptions'; -import * as TranslationsAPI from 'openai/resources/audio/translations'; +import { APIResource } from '../../resource'; +import * as SpeechAPI from './speech'; +import * as TranscriptionsAPI from './transcriptions'; +import * as TranslationsAPI from './translations'; export class Audio extends APIResource { transcriptions: TranscriptionsAPI.Transcriptions = new TranscriptionsAPI.Transcriptions(this._client); diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 4b83bae3e..bcfbc80cc 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { type Response } from 'openai/_shims/index'; -import * as SpeechAPI from 'openai/resources/audio/speech'; +import * as Core from '../../core'; +import { APIResource } from '../../resource'; +import { type Response } from '../../_shims/index'; +import * as SpeechAPI from './speech'; export class Speech extends APIResource { /** diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index f01e8556d..bbffce4ed 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import * as TranscriptionsAPI from 'openai/resources/audio/transcriptions'; -import { type Uploadable, multipartFormRequestOptions } from 'openai/core'; +import * as Core from '../../core'; +import { APIResource } from '../../resource'; +import * as TranscriptionsAPI from './transcriptions'; +import { type Uploadable, multipartFormRequestOptions } from '../../core'; export class Transcriptions extends APIResource { /** diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 234933236..890c59d55 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import * as TranslationsAPI from 'openai/resources/audio/translations'; -import { type Uploadable, multipartFormRequestOptions } from 'openai/core'; +import * as Core from '../../core'; +import { APIResource } from '../../resource'; +import * as TranslationsAPI from './translations'; +import { type Uploadable, multipartFormRequestOptions } from '../../core'; export class Translations extends APIResource { /** diff --git a/src/resources/batches.ts b/src/resources/batches.ts index 2f6af03e6..ce04dd57b 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import * as BatchesAPI from 'openai/resources/batches'; -import { CursorPage, type CursorPageParams } from 'openai/pagination'; +import * as Core from '../core'; +import { APIResource } from '../resource'; +import { isRequestOptions } from '../core'; +import * as BatchesAPI from './batches'; +import { CursorPage, type CursorPageParams } from '../pagination'; export class Batches extends APIResource { /** diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 4f3136446..719054365 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -1,15 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import * as AssistantsAPI from 'openai/resources/beta/assistants'; -import * as Shared from 'openai/resources/shared'; -import * as MessagesAPI from 'openai/resources/beta/threads/messages'; -import * as ThreadsAPI from 'openai/resources/beta/threads/threads'; -import * as RunsAPI from 'openai/resources/beta/threads/runs/runs'; -import * as StepsAPI from 'openai/resources/beta/threads/runs/steps'; -import { CursorPage, type CursorPageParams } from 'openai/pagination'; +import * as Core from '../../core'; +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as AssistantsAPI from './assistants'; +import * as Shared from '../shared'; +import * as MessagesAPI from './threads/messages'; +import * as ThreadsAPI from './threads/threads'; +import * as RunsAPI from './threads/runs/runs'; +import * as StepsAPI from './threads/runs/steps'; +import { CursorPage, type CursorPageParams } from '../../pagination'; export class Assistants extends APIResource { /** diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index ff79d5242..cefe66824 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from 'openai/resource'; -import * as AssistantsAPI from 'openai/resources/beta/assistants'; -import * as ChatAPI from 'openai/resources/beta/chat/chat'; -import * as ThreadsAPI from 'openai/resources/beta/threads/threads'; -import * as VectorStoresAPI from 'openai/resources/beta/vector-stores/vector-stores'; +import { APIResource } from '../../resource'; +import * as AssistantsAPI from './assistants'; +import * as ChatAPI from './chat/chat'; +import * as ThreadsAPI from './threads/threads'; +import * as VectorStoresAPI from './vector-stores/vector-stores'; export class Beta extends APIResource { vectorStores: VectorStoresAPI.VectorStores = new VectorStoresAPI.VectorStores(this._client); diff --git a/src/resources/beta/chat/chat.ts b/src/resources/beta/chat/chat.ts index 2b4a7a404..110ae46cb 100644 --- a/src/resources/beta/chat/chat.ts +++ b/src/resources/beta/chat/chat.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from 'openai/resource'; -import * as CompletionsAPI from 'openai/resources/beta/chat/completions'; +import { APIResource } from '../../../resource'; +import * as CompletionsAPI from './completions'; export class Chat extends APIResource { completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts index 95fd0ac79..e002b6344 100644 --- a/src/resources/beta/chat/completions.ts +++ b/src/resources/beta/chat/completions.ts @@ -1,18 +1,18 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from 'openai/lib/ChatCompletionRunner'; -export { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from 'openai/lib/ChatCompletionRunner'; +import * as Core from '../../../core'; +import { APIResource } from '../../../resource'; +import { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from '../../../lib/ChatCompletionRunner'; +export { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from '../../../lib/ChatCompletionRunner'; import { ChatCompletionStreamingRunner, ChatCompletionStreamingFunctionRunnerParams, -} from 'openai/lib/ChatCompletionStreamingRunner'; +} from '../../../lib/ChatCompletionStreamingRunner'; export { ChatCompletionStreamingRunner, ChatCompletionStreamingFunctionRunnerParams, -} from 'openai/lib/ChatCompletionStreamingRunner'; -import { BaseFunctionsArgs } from 'openai/lib/RunnableFunction'; +} from '../../../lib/ChatCompletionStreamingRunner'; +import { BaseFunctionsArgs } from '../../../lib/RunnableFunction'; export { RunnableFunction, RunnableFunctions, @@ -20,13 +20,13 @@ export { RunnableFunctionWithoutParse, ParsingFunction, ParsingToolFunction, -} from 'openai/lib/RunnableFunction'; -import { ChatCompletionToolRunnerParams } from 'openai/lib/ChatCompletionRunner'; -export { ChatCompletionToolRunnerParams } from 'openai/lib/ChatCompletionRunner'; -import { ChatCompletionStreamingToolRunnerParams } from 'openai/lib/ChatCompletionStreamingRunner'; -export { ChatCompletionStreamingToolRunnerParams } from 'openai/lib/ChatCompletionStreamingRunner'; -import { ChatCompletionStream, type ChatCompletionStreamParams } from 'openai/lib/ChatCompletionStream'; -export { ChatCompletionStream, type ChatCompletionStreamParams } from 'openai/lib/ChatCompletionStream'; +} from '../../../lib/RunnableFunction'; +import { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; +export { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; +import { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; +export { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; +import { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; +export { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; export class Completions extends APIResource { /** diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index a1f90e1e4..a5307edbe 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -1,11 +1,11 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import * as MessagesAPI from 'openai/resources/beta/threads/messages'; -import * as AssistantsAPI from 'openai/resources/beta/assistants'; -import { CursorPage, type CursorPageParams } from 'openai/pagination'; +import * as Core from '../../../core'; +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as MessagesAPI from './messages'; +import * as AssistantsAPI from '../assistants'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Messages extends APIResource { /** diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 267c0944d..715750604 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -1,19 +1,19 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIPromise } from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import { AssistantStream, RunCreateParamsBaseStream } from 'openai/lib/AssistantStream'; -import { sleep } from 'openai/core'; -import { RunSubmitToolOutputsParamsStream } from 'openai/lib/AssistantStream'; -import * as RunsAPI from 'openai/resources/beta/threads/runs/runs'; -import * as AssistantsAPI from 'openai/resources/beta/assistants'; -import * as MessagesAPI from 'openai/resources/beta/threads/messages'; -import * as ThreadsAPI from 'openai/resources/beta/threads/threads'; -import * as StepsAPI from 'openai/resources/beta/threads/runs/steps'; -import { CursorPage, type CursorPageParams } from 'openai/pagination'; -import { Stream } from 'openai/streaming'; +import * as Core from '../../../../core'; +import { APIPromise } from '../../../../core'; +import { APIResource } from '../../../../resource'; +import { isRequestOptions } from '../../../../core'; +import { AssistantStream, RunCreateParamsBaseStream } from '../../../../lib/AssistantStream'; +import { sleep } from '../../../../core'; +import { RunSubmitToolOutputsParamsStream } from '../../../../lib/AssistantStream'; +import * as RunsAPI from './runs'; +import * as AssistantsAPI from '../../assistants'; +import * as MessagesAPI from '../messages'; +import * as ThreadsAPI from '../threads'; +import * as StepsAPI from './steps'; +import { CursorPage, type CursorPageParams } from '../../../../pagination'; +import { Stream } from '../../../../streaming'; export class Runs extends APIResource { steps: StepsAPI.Steps = new StepsAPI.Steps(this._client); diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index 203741f4b..0cbb60ca4 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import * as StepsAPI from 'openai/resources/beta/threads/runs/steps'; -import { CursorPage, type CursorPageParams } from 'openai/pagination'; +import * as Core from '../../../../core'; +import { APIResource } from '../../../../resource'; +import { isRequestOptions } from '../../../../core'; +import * as StepsAPI from './steps'; +import { CursorPage, type CursorPageParams } from '../../../../pagination'; export class Steps extends APIResource { /** diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index e1fb3a2d4..662dcd09f 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -1,15 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIPromise } from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import { AssistantStream, ThreadCreateAndRunParamsBaseStream } from 'openai/lib/AssistantStream'; -import * as ThreadsAPI from 'openai/resources/beta/threads/threads'; -import * as AssistantsAPI from 'openai/resources/beta/assistants'; -import * as MessagesAPI from 'openai/resources/beta/threads/messages'; -import * as RunsAPI from 'openai/resources/beta/threads/runs/runs'; -import { Stream } from 'openai/streaming'; +import * as Core from '../../../core'; +import { APIPromise } from '../../../core'; +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import { AssistantStream, ThreadCreateAndRunParamsBaseStream } from '../../../lib/AssistantStream'; +import * as ThreadsAPI from './threads'; +import * as AssistantsAPI from '../assistants'; +import * as MessagesAPI from './messages'; +import * as RunsAPI from './runs/runs'; +import { Stream } from '../../../streaming'; export class Threads extends APIResource { runs: RunsAPI.Runs = new RunsAPI.Runs(this._client); diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts index 3ccdd0108..94f185c0f 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/beta/vector-stores/file-batches.ts @@ -1,15 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import { sleep } from 'openai/core'; -import { Uploadable } from 'openai/core'; -import { allSettledWithThrow } from 'openai/lib/Util'; -import * as FileBatchesAPI from 'openai/resources/beta/vector-stores/file-batches'; -import * as FilesAPI from 'openai/resources/beta/vector-stores/files'; -import { VectorStoreFilesPage } from 'openai/resources/beta/vector-stores/files'; -import { type CursorPageParams } from 'openai/pagination'; +import * as Core from '../../../core'; +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import { sleep } from '../../../core'; +import { Uploadable } from '../../../core'; +import { allSettledWithThrow } from '../../../lib/Util'; +import * as FileBatchesAPI from './file-batches'; +import * as FilesAPI from './files'; +import { VectorStoreFilesPage } from './files'; +import { type CursorPageParams } from '../../../pagination'; export class FileBatches extends APIResource { /** diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index ff5094065..0082ee5fa 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -1,11 +1,11 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import { sleep, Uploadable } from 'openai/core'; -import * as FilesAPI from 'openai/resources/beta/vector-stores/files'; -import { CursorPage, type CursorPageParams } from 'openai/pagination'; +import * as Core from '../../../core'; +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import { sleep, Uploadable } from '../../../core'; +import * as FilesAPI from './files'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Files extends APIResource { /** diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index 0409f3af7..3f5df1fc5 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -1,12 +1,12 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import * as VectorStoresAPI from 'openai/resources/beta/vector-stores/vector-stores'; -import * as FileBatchesAPI from 'openai/resources/beta/vector-stores/file-batches'; -import * as FilesAPI from 'openai/resources/beta/vector-stores/files'; -import { CursorPage, type CursorPageParams } from 'openai/pagination'; +import * as Core from '../../../core'; +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as VectorStoresAPI from './vector-stores'; +import * as FileBatchesAPI from './file-batches'; +import * as FilesAPI from './files'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; export class VectorStores extends APIResource { files: FilesAPI.Files = new FilesAPI.Files(this._client); diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 925401fe1..da4e90d42 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from 'openai/resource'; -import * as ChatAPI from 'openai/resources/chat/chat'; -import * as CompletionsAPI from 'openai/resources/chat/completions'; +import { APIResource } from '../../resource'; +import * as ChatAPI from './chat'; +import * as CompletionsAPI from './completions'; export class Chat extends APIResource { completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 1098499b9..07b75debe 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -1,13 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIPromise } from 'openai/core'; -import { APIResource } from 'openai/resource'; -import * as ChatCompletionsAPI from 'openai/resources/chat/completions'; -import * as CompletionsAPI from 'openai/resources/completions'; -import * as Shared from 'openai/resources/shared'; -import * as ChatAPI from 'openai/resources/chat/chat'; -import { Stream } from 'openai/streaming'; +import * as Core from '../../core'; +import { APIPromise } from '../../core'; +import { APIResource } from '../../resource'; +import * as ChatCompletionsAPI from './completions'; +import * as CompletionsAPI from '../completions'; +import * as Shared from '../shared'; +import * as ChatAPI from './chat'; +import { Stream } from '../../streaming'; export class Completions extends APIResource { /** diff --git a/src/resources/completions.ts b/src/resources/completions.ts index c37c6d802..26bf5ca0d 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -1,11 +1,11 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIPromise } from 'openai/core'; -import { APIResource } from 'openai/resource'; -import * as CompletionsAPI from 'openai/resources/completions'; -import * as ChatCompletionsAPI from 'openai/resources/chat/completions'; -import { Stream } from 'openai/streaming'; +import * as Core from '../core'; +import { APIPromise } from '../core'; +import { APIResource } from '../resource'; +import * as CompletionsAPI from './completions'; +import * as ChatCompletionsAPI from './chat/completions'; +import { Stream } from '../streaming'; export class Completions extends APIResource { /** diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index 208ceb240..28c954711 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import * as EmbeddingsAPI from 'openai/resources/embeddings'; +import * as Core from '../core'; +import { APIResource } from '../resource'; +import * as EmbeddingsAPI from './embeddings'; export class Embeddings extends APIResource { /** diff --git a/src/resources/files.ts b/src/resources/files.ts index 5d284a071..4c4030dbe 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -1,14 +1,14 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import { type Response } from 'openai/_shims/index'; -import { sleep } from 'openai/core'; -import { APIConnectionTimeoutError } from 'openai/error'; -import * as FilesAPI from 'openai/resources/files'; -import { type Uploadable, multipartFormRequestOptions } from 'openai/core'; -import { Page } from 'openai/pagination'; +import * as Core from '../core'; +import { APIResource } from '../resource'; +import { isRequestOptions } from '../core'; +import { type Response } from '../_shims/index'; +import { sleep } from '../core'; +import { APIConnectionTimeoutError } from '../error'; +import * as FilesAPI from './files'; +import { type Uploadable, multipartFormRequestOptions } from '../core'; +import { Page } from '../pagination'; export class Files extends APIResource { /** diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index c8d688b0c..b1ba34ecf 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from 'openai/resource'; -import * as JobsAPI from 'openai/resources/fine-tuning/jobs/jobs'; +import { APIResource } from '../../resource'; +import * as JobsAPI from './jobs/jobs'; export class FineTuning extends APIResource { jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); diff --git a/src/resources/fine-tuning/jobs/checkpoints.ts b/src/resources/fine-tuning/jobs/checkpoints.ts index 468cb3001..0e3cdeb79 100644 --- a/src/resources/fine-tuning/jobs/checkpoints.ts +++ b/src/resources/fine-tuning/jobs/checkpoints.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import * as CheckpointsAPI from 'openai/resources/fine-tuning/jobs/checkpoints'; -import { CursorPage, type CursorPageParams } from 'openai/pagination'; +import * as Core from '../../../core'; +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as CheckpointsAPI from './checkpoints'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Checkpoints extends APIResource { /** diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 874d30047..403e0069f 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -1,11 +1,11 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import { isRequestOptions } from 'openai/core'; -import * as JobsAPI from 'openai/resources/fine-tuning/jobs/jobs'; -import * as CheckpointsAPI from 'openai/resources/fine-tuning/jobs/checkpoints'; -import { CursorPage, type CursorPageParams } from 'openai/pagination'; +import * as Core from '../../../core'; +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as JobsAPI from './jobs'; +import * as CheckpointsAPI from './checkpoints'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Jobs extends APIResource { checkpoints: CheckpointsAPI.Checkpoints = new CheckpointsAPI.Checkpoints(this._client); diff --git a/src/resources/images.ts b/src/resources/images.ts index 95f0b6ff2..337909578 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import * as ImagesAPI from 'openai/resources/images'; -import { type Uploadable, multipartFormRequestOptions } from 'openai/core'; +import * as Core from '../core'; +import { APIResource } from '../resource'; +import * as ImagesAPI from './images'; +import { type Uploadable, multipartFormRequestOptions } from '../core'; export class Images extends APIResource { /** diff --git a/src/resources/models.ts b/src/resources/models.ts index 4d5bc57e9..1d94c6c55 100644 --- a/src/resources/models.ts +++ b/src/resources/models.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import * as ModelsAPI from 'openai/resources/models'; -import { Page } from 'openai/pagination'; +import * as Core from '../core'; +import { APIResource } from '../resource'; +import * as ModelsAPI from './models'; +import { Page } from '../pagination'; export class Models extends APIResource { /** diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index b9b9d7fc6..c018f65e7 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from 'openai/core'; -import { APIResource } from 'openai/resource'; -import * as ModerationsAPI from 'openai/resources/moderations'; +import * as Core from '../core'; +import { APIResource } from '../resource'; +import * as ModerationsAPI from './moderations'; export class Moderations extends APIResource { /** From 3f4b74387c0f1619b4f19e47e89bc1cdf403676b Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 13 May 2024 15:01:58 -0400 Subject: [PATCH 118/533] release: 4.46.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 492591f96..561a60e0a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.46.0" + ".": "4.46.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e35a5974..a9ac6efcf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.46.1 (2024-05-13) + +Full Changelog: [v4.46.0...v4.46.1](https://github.com/openai/openai-node/compare/v4.46.0...v4.46.1) + +### Refactors + +* change import paths to be relative ([#843](https://github.com/openai/openai-node/issues/843)) ([7913574](https://github.com/openai/openai-node/commit/7913574bdb6fcbcf68e56e8def351add6c43310a)) + ## 4.46.0 (2024-05-13) Full Changelog: [v4.45.0...v4.46.0](https://github.com/openai/openai-node/compare/v4.45.0...v4.46.0) diff --git a/README.md b/README.md index dd5f4c278..5d0ac940e 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.46.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.46.1/mod.ts'; ``` diff --git a/package.json b/package.json index b2fa486f0..544c8b346 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.46.0", + "version": "4.46.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 7203ae1fe..9ba19ef80 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.46.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.46.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index ac125a286..c207da397 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.46.0'; // x-release-please-version +export const VERSION = '4.46.1'; // x-release-please-version From 8701aa6d5e0c8f9c4c6c5b6f748cf1947c922426 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 14 May 2024 07:29:11 -0400 Subject: [PATCH 119/533] feat(api): add incomplete state (#846) --- .stats.yml | 2 +- src/resources/batches.ts | 9 +++-- src/resources/beta/assistants.ts | 15 ++++---- src/resources/beta/threads/runs/runs.ts | 35 +++++++++++-------- src/resources/beta/threads/threads.ts | 20 ++++++----- .../beta/vector-stores/file-batches.ts | 1 + src/resources/files.ts | 18 ++++++---- 7 files changed, 61 insertions(+), 39 deletions(-) diff --git a/.stats.yml b/.stats.yml index f44b9b46a..2e5c705a0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-47007cc1aa5bc7b74107a99b377925978a0bd376ed67bdae724e80d5d0b63d57.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-363dd904e5d6e65b3a323fc88e6b502fb23a6aa319be219273e3ee47c7530993.yml diff --git a/src/resources/batches.ts b/src/resources/batches.ts index ce04dd57b..399c931e1 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -215,9 +215,11 @@ export interface BatchCreateParams { /** * The endpoint to be used for all requests in the batch. Currently - * `/v1/chat/completions` and `/v1/embeddings` are supported. + * `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + * Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 + * embedding inputs across all requests in the batch. */ - endpoint: '/v1/chat/completions' | '/v1/embeddings'; + endpoint: '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions'; /** * The ID of an uploaded file that contains requests for the new batch. @@ -227,7 +229,8 @@ export interface BatchCreateParams { * * Your input file must be formatted as a * [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput), - * and must be uploaded with the purpose `batch`. + * and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + * requests, and can be up to 100 MB in size. */ input_file_id: string; diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 719054365..120e63773 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -144,8 +144,9 @@ export interface Assistant { /** * Specifies the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -1047,8 +1048,9 @@ export interface AssistantCreateParams { /** * Specifies the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -1193,8 +1195,9 @@ export interface AssistantUpdateParams { /** * Specifies the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 715750604..9e44ccfe5 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -176,6 +176,7 @@ export class Runs extends APIResource { break; //We return the run in any terminal state. case 'requires_action': + case 'incomplete': case 'cancelled': case 'completed': case 'failed': @@ -409,8 +410,9 @@ export interface Run { /** * Specifies the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -432,8 +434,8 @@ export interface Run { /** * The status of the run, which can be either `queued`, `in_progress`, - * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or - * `expired`. + * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + * `incomplete`, or `expired`. */ status: RunStatus; @@ -584,8 +586,8 @@ export namespace Run { /** * The status of the run, which can be either `queued`, `in_progress`, - * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or - * `expired`. + * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + * `incomplete`, or `expired`. */ export type RunStatus = | 'queued' @@ -595,6 +597,7 @@ export type RunStatus = | 'cancelled' | 'failed' | 'completed' + | 'incomplete' | 'expired'; export type RunCreateParams = RunCreateParamsNonStreaming | RunCreateParamsStreaming; @@ -684,8 +687,9 @@ export interface RunCreateParamsBase { /** * Specifies the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -945,8 +949,9 @@ export interface RunCreateAndPollParams { /** * Specifies the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -1152,8 +1157,9 @@ export interface RunCreateAndStreamParams { /** * Specifies the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -1359,8 +1365,9 @@ export interface RunStreamParams { /** * Specifies the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 662dcd09f..63dd815e7 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -130,8 +130,9 @@ export interface AssistantResponseFormat { /** * Specifies the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -516,8 +517,9 @@ export interface ThreadCreateAndRunParamsBase { /** * Specifies the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -875,8 +877,9 @@ export interface ThreadCreateAndRunPollParams { /** * Specifies the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. @@ -1206,8 +1209,9 @@ export interface ThreadCreateAndRunStreamParams { /** * Specifies the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts index 94f185c0f..65738cca6 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/beta/vector-stores/file-batches.ts @@ -138,6 +138,7 @@ export class FileBatches extends APIResource { await sleep(sleepInterval); break; case 'failed': + case 'cancelled': case 'completed': return batch; } diff --git a/src/resources/files.ts b/src/resources/files.ts index 4c4030dbe..de5067d04 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -12,14 +12,18 @@ import { Page } from '../pagination'; export class Files extends APIResource { /** - * Upload a file that can be used across various endpoints. The size of all the - * files uploaded by one organization can be up to 100 GB. + * Upload a file that can be used across various endpoints. Individual files can be + * up to 512 MB, and the size of all files uploaded by one organization can be up + * to 100 GB. * - * The size of individual files can be a maximum of 512 MB or 2 million tokens for - * Assistants. See the - * [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to - * learn more about the types of files supported. The Fine-tuning API only supports - * `.jsonl` files. + * The Assistants API supports files up to 2 million tokens and of specific file + * types. See the + * [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for + * details. + * + * The Fine-tuning API only supports `.jsonl` files. + * + * The Batch API only supports `.jsonl` files up to 100 MB in size. * * Please [contact us](https://help.openai.com/) if you need to increase these * storage limits. From 2b1bc88639f327053a16fae0d2a5edbd006dd6a3 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 14 May 2024 07:29:32 -0400 Subject: [PATCH 120/533] release: 4.47.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 561a60e0a..dfd38477d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.46.1" + ".": "4.47.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index a9ac6efcf..af4227fdc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.47.0 (2024-05-14) + +Full Changelog: [v4.46.1...v4.47.0](https://github.com/openai/openai-node/compare/v4.46.1...v4.47.0) + +### Features + +* **api:** add incomplete state ([#846](https://github.com/openai/openai-node/issues/846)) ([5f663a1](https://github.com/openai/openai-node/commit/5f663a167361b905c6d0c1242e8a78037a7e4a57)) + ## 4.46.1 (2024-05-13) Full Changelog: [v4.46.0...v4.46.1](https://github.com/openai/openai-node/compare/v4.46.0...v4.46.1) diff --git a/README.md b/README.md index 5d0ac940e..f88c4929d 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.46.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.47.0/mod.ts'; ``` diff --git a/package.json b/package.json index 544c8b346..03f3e2188 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.46.1", + "version": "4.47.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 9ba19ef80..427a93c47 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.46.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.47.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index c207da397..1fa9d58ab 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.46.1'; // x-release-please-version +export const VERSION = '4.47.0'; // x-release-please-version From 139e690546775b3568934dd990dd329fce2fbc2f Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 14 May 2024 10:46:22 -0400 Subject: [PATCH 121/533] chore(internal): add slightly better logging to scripts (#848) --- .github/workflows/ci.yml | 6 ++---- scripts/format | 8 ++++++++ scripts/lint | 1 + scripts/test | 1 - 4 files changed, 11 insertions(+), 5 deletions(-) create mode 100755 scripts/format diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d2a8037a3..a55376f66 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,12 +22,10 @@ jobs: node-version: '18' - name: Install dependencies - run: | - yarn install + run: yarn install - name: Check types - run: | - yarn build + run: ./scripts/lint test: name: test runs-on: ubuntu-latest diff --git a/scripts/format b/scripts/format new file mode 100755 index 000000000..d297e762f --- /dev/null +++ b/scripts/format @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Running eslint --fix" +./node_modules/.bin/eslint --fix --ext ts,js . diff --git a/scripts/lint b/scripts/lint index 4f05d6609..6b0e5dc3e 100755 --- a/scripts/lint +++ b/scripts/lint @@ -4,4 +4,5 @@ set -e cd "$(dirname "$0")/.." +echo "==> Running eslint" ./node_modules/.bin/eslint --ext ts,js . diff --git a/scripts/test b/scripts/test index b62a7cccd..2049e31b0 100755 --- a/scripts/test +++ b/scripts/test @@ -52,6 +52,5 @@ else echo fi -# Run tests echo "==> Running tests" ./node_modules/.bin/jest "$@" From 7376041eb470b6d8db0229d4336eda5a658b66e0 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 14 May 2024 10:46:43 -0400 Subject: [PATCH 122/533] release: 4.47.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index dfd38477d..9516f2682 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.47.0" + ".": "4.47.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index af4227fdc..f239c2921 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.47.1 (2024-05-14) + +Full Changelog: [v4.47.0...v4.47.1](https://github.com/openai/openai-node/compare/v4.47.0...v4.47.1) + +### Chores + +* **internal:** add slightly better logging to scripts ([#848](https://github.com/openai/openai-node/issues/848)) ([139e690](https://github.com/openai/openai-node/commit/139e690546775b3568934dd990dd329fce2fbc2f)) + ## 4.47.0 (2024-05-14) Full Changelog: [v4.46.1...v4.47.0](https://github.com/openai/openai-node/compare/v4.46.1...v4.47.0) diff --git a/README.md b/README.md index f88c4929d..693654151 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.47.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.47.1/mod.ts'; ``` diff --git a/package.json b/package.json index 03f3e2188..9c385f41c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.47.0", + "version": "4.47.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 427a93c47..cd3be37fa 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.47.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.47.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 1fa9d58ab..710b8f27e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.47.0'; // x-release-please-version +export const VERSION = '4.47.1'; // x-release-please-version From 5828fc0c991302badc8d31973ed77dfe7e2be339 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Tue, 28 May 2024 20:24:50 +0100 Subject: [PATCH 123/533] docs(readme): add bundle size badge (#869) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 693654151..6a2239f1f 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # OpenAI Node API Library -[![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) +[![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) This library provides convenient access to the OpenAI REST API from TypeScript or JavaScript. From ed4219a565976750637d8c68b2b35409aca447af Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 28 May 2024 19:25:12 +0000 Subject: [PATCH 124/533] release: 4.47.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9516f2682..c7928e176 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.47.1" + ".": "4.47.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index f239c2921..79d67fcd8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.47.2 (2024-05-28) + +Full Changelog: [v4.47.1...v4.47.2](https://github.com/openai/openai-node/compare/v4.47.1...v4.47.2) + +### Documentation + +* **readme:** add bundle size badge ([#869](https://github.com/openai/openai-node/issues/869)) ([e252132](https://github.com/openai/openai-node/commit/e2521327b7b4f5abe97e4c58c417b37d00079ef8)) + ## 4.47.1 (2024-05-14) Full Changelog: [v4.47.0...v4.47.1](https://github.com/openai/openai-node/compare/v4.47.0...v4.47.1) diff --git a/README.md b/README.md index 6a2239f1f..b5feb43f3 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.47.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.47.2/mod.ts'; ``` diff --git a/package.json b/package.json index 9c385f41c..217da8bd3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.47.1", + "version": "4.47.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index cd3be37fa..7b9374217 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.47.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.47.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 710b8f27e..b343abcea 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.47.1'; // x-release-please-version +export const VERSION = '4.47.2'; // x-release-please-version From a70f641ea0e3bc4668fd3803f7a1968665df3d7c Mon Sep 17 00:00:00 2001 From: Deyaaeldeen Almahallawi Date: Thu, 30 May 2024 13:16:04 -0700 Subject: [PATCH 125/533] [Azure] Update Batch API (#871) --- src/index.ts | 1 - tests/lib/azure.test.ts | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/index.ts b/src/index.ts index 854536161..fdafabf3d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -492,7 +492,6 @@ const _deployments_endpoints = new Set([ '/audio/translations', '/audio/speech', '/images/generations', - '/batches', ]); const API_KEY_SENTINEL = ''; diff --git a/tests/lib/azure.test.ts b/tests/lib/azure.test.ts index 32b59ae33..06ca1d464 100644 --- a/tests/lib/azure.test.ts +++ b/tests/lib/azure.test.ts @@ -290,7 +290,7 @@ describe('azure request building', () => { fetch: testFetch, }); - test('handles Batch', async () => { + test('handles batch', async () => { expect( await client.batches.create({ completion_window: '24h', @@ -298,7 +298,7 @@ describe('azure request building', () => { input_file_id: 'file-id', }), ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/batches?api-version=${apiVersion}`, + url: `https://example.com/openai/batches?api-version=${apiVersion}`, }); }); @@ -423,7 +423,7 @@ describe('azure request building', () => { fetch: testFetch, }); - test('Batch is not handled', async () => { + test('handles batch', async () => { expect( await client.batches.create({ completion_window: '24h', From 0152859aa0ece3374ecfe72161069ba127f08f93 Mon Sep 17 00:00:00 2001 From: Deyaaeldeen Almahallawi Date: Thu, 30 May 2024 13:16:38 -0700 Subject: [PATCH 126/533] docs(azure): update example and readme to use Entra ID (#857) --- README.md | 11 +++++++++-- examples/azure.ts | 11 ++++++++--- examples/package.json | 3 ++- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index b5feb43f3..89601dcce 100644 --- a/README.md +++ b/README.md @@ -367,11 +367,18 @@ To use this library with [Azure OpenAI](https://learn.microsoft.com/en-us/azure/ class instead of the `OpenAI` class. > [!IMPORTANT] -> The Azure API shape differs from the core API shape which means that the static types for responses / params +> The Azure API shape slightly differs from the core API shape which means that the static types for responses / params > won't always be correct. ```ts -const openai = new AzureOpenAI(); +import { AzureOpenAI } from 'openai'; +import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; + +const credential = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const azureADTokenProvider = getBearerTokenProvider(credential, scope); + +const openai = new AzureOpenAI({ azureADTokenProvider }); const result = await openai.chat.completions.create({ model: 'gpt-4-1106-preview', diff --git a/examples/azure.ts b/examples/azure.ts index 7f57e45c3..5fe1718fa 100755 --- a/examples/azure.ts +++ b/examples/azure.ts @@ -1,14 +1,19 @@ #!/usr/bin/env -S npm run tsn -T import { AzureOpenAI } from 'openai'; +import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; // Corresponds to your Model deployment within your OpenAI resource, e.g. gpt-4-1106-preview // Navigate to the Azure OpenAI Studio to deploy a model. const deployment = 'gpt-4-1106-preview'; -// Make sure to set both AZURE_OPENAI_ENDPOINT with the endpoint of your Azure resource and AZURE_OPENAI_API_KEY with the API key. -// You can find both information in the Azure Portal. -const openai = new AzureOpenAI(); +const credential = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const azureADTokenProvider = getBearerTokenProvider(credential, scope); + +// Make sure to set AZURE_OPENAI_ENDPOINT with the endpoint of your Azure resource. +// You can find it in the Azure Portal. +const openai = new AzureOpenAI({ azureADTokenProvider }); async function main() { console.log('Non-streaming:'); diff --git a/examples/package.json b/examples/package.json index 04ed507b9..c8a5f7087 100644 --- a/examples/package.json +++ b/examples/package.json @@ -9,7 +9,8 @@ "express": "^4.18.2", "next": "^14.1.1", "openai": "file:..", - "zod-to-json-schema": "^3.21.4" + "zod-to-json-schema": "^3.21.4", + "@azure/identity": "^4.2.0" }, "devDependencies": { "@types/body-parser": "^1.19.3", From f2fa17d9f4cb202c4abaaf463e2c8a5589ad4320 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 31 May 2024 18:12:01 +0100 Subject: [PATCH 127/533] fix: allow git imports for pnpm (#873) --- package.json | 1 - 1 file changed, 1 deletion(-) diff --git a/package.json b/package.json index 217da8bd3..777aa72e5 100644 --- a/package.json +++ b/package.json @@ -16,7 +16,6 @@ "scripts": { "test": "./scripts/test", "build": "./scripts/build", - "prepack": "echo 'to pack, run yarn build && (cd dist; yarn pack)' && exit 1", "prepublishOnly": "echo 'to publish, run yarn build && (cd dist; yarn publish)' && exit 1", "format": "prettier --write --cache --cache-strategy metadata . !dist", "prepare": "if ./scripts/utils/check-is-in-git-install.sh; then ./scripts/build; fi", From fd70373450d6c39ff55d984a2ff13ea7a7df23d1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 31 May 2024 17:12:22 +0000 Subject: [PATCH 128/533] release: 4.47.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c7928e176..066d588c5 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.47.2" + ".": "4.47.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 79d67fcd8..47dba2105 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.47.3 (2024-05-31) + +Full Changelog: [v4.47.2...v4.47.3](https://github.com/openai/openai-node/compare/v4.47.2...v4.47.3) + +### Bug Fixes + +* allow git imports for pnpm ([#873](https://github.com/openai/openai-node/issues/873)) ([9da9809](https://github.com/openai/openai-node/commit/9da98090e80cbe988a3d695e4c9b57439080ec3e)) + + +### Documentation + +* **azure:** update example and readme to use Entra ID ([#857](https://github.com/openai/openai-node/issues/857)) ([722eff1](https://github.com/openai/openai-node/commit/722eff1a7aeaa2ce3c40301709db61258c9afa16)) + ## 4.47.2 (2024-05-28) Full Changelog: [v4.47.1...v4.47.2](https://github.com/openai/openai-node/compare/v4.47.1...v4.47.2) diff --git a/README.md b/README.md index 89601dcce..7b96d5188 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.47.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.47.3/mod.ts'; ``` diff --git a/package.json b/package.json index 777aa72e5..d0587b05f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.47.2", + "version": "4.47.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 7b9374217..55ddd2120 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.47.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.47.3/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index b343abcea..7b9e87774 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.47.2'; // x-release-please-version +export const VERSION = '4.47.3'; // x-release-please-version From 7b857feb5ef61efd4c1068cd36d63aae34365ef1 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:55:10 +0100 Subject: [PATCH 129/533] feat(api): updates (#874) --- .stats.yml | 2 +- src/resources/batches.ts | 6 +- src/resources/beta/assistants.ts | 99 +++++++++++++++++++ src/resources/beta/threads/threads.ts | 90 +++++++++++++++++ .../beta/vector-stores/file-batches.ts | 47 +++++++++ src/resources/beta/vector-stores/files.ts | 90 +++++++++++++++++ .../beta/vector-stores/vector-stores.ts | 43 ++++++++ src/resources/chat/completions.ts | 6 +- src/resources/files.ts | 12 ++- src/resources/fine-tuning/jobs/jobs.ts | 5 + src/resources/shared.ts | 8 +- tests/api-resources/beta/assistants.test.ts | 4 +- .../beta/threads/threads.test.ts | 12 ++- .../beta/vector-stores/file-batches.test.ts | 5 +- .../beta/vector-stores/files.test.ts | 5 +- 15 files changed, 416 insertions(+), 18 deletions(-) diff --git a/.stats.yml b/.stats.yml index 2e5c705a0..11d2b0b18 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-363dd904e5d6e65b3a323fc88e6b502fb23a6aa319be219273e3ee47c7530993.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0577fd0d08da6b867b002a5accd45f7116ef91c4940b41cf45dc479938c77163.yml diff --git a/src/resources/batches.ts b/src/resources/batches.ts index 399c931e1..d23c059dc 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -37,7 +37,9 @@ export class Batches extends APIResource { } /** - * Cancels an in-progress batch. + * Cancels an in-progress batch. The batch will be in status `cancelling` for up to + * 10 minutes, before changing to `cancelled`, where it will have partial results + * (if any) available in the output file. */ cancel(batchId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.post(`/batches/${batchId}/cancel`, options); @@ -228,7 +230,7 @@ export interface BatchCreateParams { * for how to upload a file. * * Your input file must be formatted as a - * [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput), + * [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), * and must be uploaded with the purpose `batch`. The file can contain up to 50,000 * requests, and can be up to 100 MB in size. */ diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 120e63773..cdea09266 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -258,6 +258,7 @@ export type AssistantStreamEvent = | AssistantStreamEvent.ThreadRunInProgress | AssistantStreamEvent.ThreadRunRequiresAction | AssistantStreamEvent.ThreadRunCompleted + | AssistantStreamEvent.ThreadRunIncomplete | AssistantStreamEvent.ThreadRunFailed | AssistantStreamEvent.ThreadRunCancelling | AssistantStreamEvent.ThreadRunCancelled @@ -362,6 +363,20 @@ export namespace AssistantStreamEvent { event: 'thread.run.completed'; } + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * ends with status `incomplete`. + */ + export interface ThreadRunIncomplete { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.incomplete'; + } + /** * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) * fails. @@ -618,6 +633,30 @@ export interface FileSearchTool { * The type of tool being defined: `file_search` */ type: 'file_search'; + + /** + * Overrides for the file search tool. + */ + file_search?: FileSearchTool.FileSearch; +} + +export namespace FileSearchTool { + /** + * Overrides for the file search tool. + */ + export interface FileSearch { + /** + * The maximum number of results the file search tool should output. The default is + * 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1 + * and 50 inclusive. + * + * Note that the file search tool may output fewer than `max_num_results` results. + * See the + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) + * for more information. + */ + max_num_results?: number; + } } export interface FunctionTool { @@ -843,6 +882,7 @@ export type RunStreamEvent = | RunStreamEvent.ThreadRunInProgress | RunStreamEvent.ThreadRunRequiresAction | RunStreamEvent.ThreadRunCompleted + | RunStreamEvent.ThreadRunIncomplete | RunStreamEvent.ThreadRunFailed | RunStreamEvent.ThreadRunCancelling | RunStreamEvent.ThreadRunCancelled @@ -919,6 +959,20 @@ export namespace RunStreamEvent { event: 'thread.run.completed'; } + /** + * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + * ends with status `incomplete`. + */ + export interface ThreadRunIncomplete { + /** + * Represents an execution run on a + * [thread](https://platform.openai.com/docs/api-reference/threads). + */ + data: RunsAPI.Run; + + event: 'thread.run.incomplete'; + } + /** * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) * fails. @@ -1140,6 +1194,12 @@ export namespace AssistantCreateParams { export namespace FileSearch { export interface VectorStore { + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. + */ + chunking_strategy?: VectorStore.Auto | VectorStore.Static; + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to * add to the vector store. There can be a maximum of 10000 files in a vector @@ -1155,6 +1215,45 @@ export namespace AssistantCreateParams { */ metadata?: unknown; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 63dd815e7..9d27b0328 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -369,6 +369,12 @@ export namespace ThreadCreateParams { export namespace FileSearch { export interface VectorStore { + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. + */ + chunking_strategy?: VectorStore.Auto | VectorStore.Static; + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to * add to the vector store. There can be a maximum of 10000 files in a vector @@ -384,6 +390,45 @@ export namespace ThreadCreateParams { */ metadata?: unknown; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } @@ -711,6 +756,12 @@ export namespace ThreadCreateAndRunParams { export namespace FileSearch { export interface VectorStore { + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. + */ + chunking_strategy?: VectorStore.Auto | VectorStore.Static; + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to * add to the vector store. There can be a maximum of 10000 files in a vector @@ -726,6 +777,45 @@ export namespace ThreadCreateAndRunParams { */ metadata?: unknown; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts index 65738cca6..2483e984f 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/beta/vector-stores/file-batches.ts @@ -261,6 +261,53 @@ export interface FileBatchCreateParams { * files. */ file_ids: Array; + + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. + */ + chunking_strategy?: + | FileBatchCreateParams.AutoChunkingStrategyRequestParam + | FileBatchCreateParams.StaticChunkingStrategyRequestParam; +} + +export namespace FileBatchCreateParams { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface AutoChunkingStrategyRequestParam { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface StaticChunkingStrategyRequestParam { + static: StaticChunkingStrategyRequestParam.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace StaticChunkingStrategyRequestParam { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } } export interface FileBatchListFilesParams extends CursorPageParams { diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index 0082ee5fa..04a0413be 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -217,6 +217,11 @@ export interface VectorStoreFile { * attached to. */ vector_store_id: string; + + /** + * The strategy used to chunk the file. + */ + chunking_strategy?: VectorStoreFile.Static | VectorStoreFile.Other; } export namespace VectorStoreFile { @@ -235,6 +240,44 @@ export namespace VectorStoreFile { */ message: string; } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + + /** + * This is returned when the chunking strategy is unknown. Typically, this is + * because the file was indexed before the `chunking_strategy` concept was + * introduced in the API. + */ + export interface Other { + /** + * Always `other`. + */ + type: 'other'; + } } export interface VectorStoreFileDeleted { @@ -252,6 +295,53 @@ export interface FileCreateParams { * files. */ file_id: string; + + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. + */ + chunking_strategy?: + | FileCreateParams.AutoChunkingStrategyRequestParam + | FileCreateParams.StaticChunkingStrategyRequestParam; +} + +export namespace FileCreateParams { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface AutoChunkingStrategyRequestParam { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface StaticChunkingStrategyRequestParam { + static: StaticChunkingStrategyRequestParam.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace StaticChunkingStrategyRequestParam { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } } export interface FileListParams extends CursorPageParams { diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index 3f5df1fc5..d2d4c7d39 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -200,6 +200,12 @@ export interface VectorStoreDeleted { } export interface VectorStoreCreateParams { + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. Only applicable if `file_ids` is non-empty. + */ + chunking_strategy?: VectorStoreCreateParams.Auto | VectorStoreCreateParams.Static; + /** * The expiration policy for a vector store. */ @@ -227,6 +233,43 @@ export interface VectorStoreCreateParams { } export namespace VectorStoreCreateParams { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + /** * The expiration policy for a vector store. */ diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 07b75debe..cbf7bcc2c 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -137,7 +137,7 @@ export interface ChatCompletionAssistantMessageParam { * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of * a function that should be called, as generated by the model. */ - function_call?: ChatCompletionAssistantMessageParam.FunctionCall; + function_call?: ChatCompletionAssistantMessageParam.FunctionCall | null; /** * An optional name for the participant. Provides the model information to @@ -885,8 +885,8 @@ export namespace ChatCompletionCreateParams { /** * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) - * for examples, and the + * [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + * and the * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for * documentation about the format. * diff --git a/src/resources/files.ts b/src/resources/files.ts index de5067d04..d86dd9972 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -21,9 +21,15 @@ export class Files extends APIResource { * [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for * details. * - * The Fine-tuning API only supports `.jsonl` files. + * The Fine-tuning API only supports `.jsonl` files. The input also has certain + * required formats for fine-tuning + * [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + * [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + * models. * - * The Batch API only supports `.jsonl` files up to 100 MB in size. + * The Batch API only supports `.jsonl` files up to 100 MB in size. The input also + * has a specific required + * [format](https://platform.openai.com/docs/api-reference/batch/request-input). * * Please [contact us](https://help.openai.com/) if you need to increase these * storage limits. @@ -194,7 +200,7 @@ export interface FileCreateParams { * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). */ - purpose: 'assistants' | 'batch' | 'fine-tune'; + purpose: 'assistants' | 'batch' | 'fine-tune' | 'vision'; } export interface FileListParams { diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 403e0069f..12990c6fc 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -312,6 +312,11 @@ export interface JobCreateParams { * Your dataset must be formatted as a JSONL file. Additionally, you must upload * your file with the purpose `fine-tune`. * + * The contents of the file should differ depending on if the model uses the + * [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + * [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + * format. + * * See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) * for more details. */ diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 93fa05fa4..45969ea65 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -25,8 +25,8 @@ export interface FunctionDefinition { /** * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) - * for examples, and the + * [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + * and the * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for * documentation about the format. * @@ -37,8 +37,8 @@ export interface FunctionDefinition { /** * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) - * for examples, and the + * [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + * and the * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for * documentation about the format. * diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index 56ce8446a..4049f09b3 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -33,7 +33,9 @@ describe('resource assistants', () => { code_interpreter: { file_ids: ['string', 'string', 'string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [{ file_ids: ['string', 'string', 'string'], metadata: {} }], + vector_stores: [ + { file_ids: ['string', 'string', 'string'], chunking_strategy: { type: 'auto' }, metadata: {} }, + ], }, }, tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index 4c4256258..ebc78f357 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -132,7 +132,13 @@ describe('resource threads', () => { code_interpreter: { file_ids: ['string', 'string', 'string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [{ file_ids: ['string', 'string', 'string'], metadata: {} }], + vector_stores: [ + { + file_ids: ['string', 'string', 'string'], + chunking_strategy: { type: 'auto' }, + metadata: {}, + }, + ], }, }, }, @@ -310,7 +316,9 @@ describe('resource threads', () => { code_interpreter: { file_ids: ['string', 'string', 'string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [{ file_ids: ['string', 'string', 'string'], metadata: {} }], + vector_stores: [ + { file_ids: ['string', 'string', 'string'], chunking_strategy: { type: 'auto' }, metadata: {} }, + ], }, }, metadata: {}, diff --git a/tests/api-resources/beta/vector-stores/file-batches.test.ts b/tests/api-resources/beta/vector-stores/file-batches.test.ts index 782b33a0c..b8ff697b7 100644 --- a/tests/api-resources/beta/vector-stores/file-batches.test.ts +++ b/tests/api-resources/beta/vector-stores/file-batches.test.ts @@ -23,7 +23,10 @@ describe('resource fileBatches', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'] }); + const response = await openai.beta.vectorStores.fileBatches.create('vs_abc123', { + file_ids: ['string'], + chunking_strategy: { type: 'auto' }, + }); }); test('retrieve', async () => { diff --git a/tests/api-resources/beta/vector-stores/files.test.ts b/tests/api-resources/beta/vector-stores/files.test.ts index 03340753c..60906dac3 100644 --- a/tests/api-resources/beta/vector-stores/files.test.ts +++ b/tests/api-resources/beta/vector-stores/files.test.ts @@ -21,7 +21,10 @@ describe('resource files', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.vectorStores.files.create('vs_abc123', { file_id: 'string' }); + const response = await openai.beta.vectorStores.files.create('vs_abc123', { + file_id: 'string', + chunking_strategy: { type: 'auto' }, + }); }); test('retrieve', async () => { From dba4ffb68162ffa6432d8341213ce70ec54c0ab8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 3 Jun 2024 22:55:32 +0000 Subject: [PATCH 130/533] release: 4.48.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 066d588c5..79313ca57 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.47.3" + ".": "4.48.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 47dba2105..665ac0b11 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.48.0 (2024-06-03) + +Full Changelog: [v4.47.3...v4.48.0](https://github.com/openai/openai-node/compare/v4.47.3...v4.48.0) + +### Features + +* **api:** updates ([#874](https://github.com/openai/openai-node/issues/874)) ([295c248](https://github.com/openai/openai-node/commit/295c2486005f6f1eb81cbbd6994b4382801d0707)) + ## 4.47.3 (2024-05-31) Full Changelog: [v4.47.2...v4.47.3](https://github.com/openai/openai-node/compare/v4.47.2...v4.47.3) diff --git a/README.md b/README.md index 7b96d5188..e71459c97 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.47.3/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.48.0/mod.ts'; ``` diff --git a/package.json b/package.json index d0587b05f..b666d2f6f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.47.3", + "version": "4.48.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 55ddd2120..2e3f7a94c 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.47.3/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.48.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 7b9e87774..b09f38344 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.47.3'; // x-release-please-version +export const VERSION = '4.48.0'; // x-release-please-version From 08634c26ea11d31e66421fba1171985601b9e633 Mon Sep 17 00:00:00 2001 From: meorphis Date: Tue, 4 Jun 2024 13:54:23 -0400 Subject: [PATCH 131/533] fix: resolve typescript issue --- src/lib/AbstractChatCompletionRunner.ts | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts index 8a8f4670d..5764b85b2 100644 --- a/src/lib/AbstractChatCompletionRunner.ts +++ b/src/lib/AbstractChatCompletionRunner.ts @@ -232,7 +232,12 @@ export abstract class AbstractChatCompletionRunner< while (i-- > 0) { const message = this.messages[i]; if (isAssistantMessage(message)) { - return { ...message, content: message.content ?? null }; + const { function_call, ...rest } = message; + const ret: ChatCompletionMessage = { ...rest, content: message.content ?? null }; + if (function_call) { + ret.function_call = function_call; + } + return ret; } } throw new OpenAIError('stream ended without producing a ChatCompletionMessage with role=assistant'); From 28187045cb18625dd54e0be1414cccfd3d4bfc35 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 4 Jun 2024 18:00:05 +0000 Subject: [PATCH 132/533] release: 4.48.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 79313ca57..94b98d286 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.48.0" + ".": "4.48.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 665ac0b11..7926a5ec1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.48.1 (2024-06-04) + +Full Changelog: [v4.48.0...v4.48.1](https://github.com/openai/openai-node/compare/v4.48.0...v4.48.1) + +### Bug Fixes + +* resolve typescript issue ([1129707](https://github.com/openai/openai-node/commit/11297073b1a370fc9c8676446f939a48071999b2)) + ## 4.48.0 (2024-06-03) Full Changelog: [v4.47.3...v4.48.0](https://github.com/openai/openai-node/compare/v4.47.3...v4.48.0) diff --git a/README.md b/README.md index e71459c97..1e208ffa3 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.48.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.48.1/mod.ts'; ``` diff --git a/package.json b/package.json index b666d2f6f..fbd66f153 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.48.0", + "version": "4.48.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 2e3f7a94c..9f8c154a8 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.48.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.48.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index b09f38344..1e0cc13b8 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.48.0'; // x-release-please-version +export const VERSION = '4.48.1'; // x-release-please-version From db8b6448c348aeb908f7b7d1ac196632d573ae65 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 5 Jun 2024 05:33:40 -0400 Subject: [PATCH 133/533] chore(internal): minor change to tests (#881) --- tests/api-resources/audio/speech.test.ts | 2 +- tests/api-resources/completions.test.ts | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts index 18302ce9a..3a7c22a29 100644 --- a/tests/api-resources/audio/speech.test.ts +++ b/tests/api-resources/audio/speech.test.ts @@ -12,7 +12,7 @@ describe('resource speech', () => { test.skip('create: required and optional params', async () => { const response = await openai.audio.speech.create({ input: 'string', - model: 'string', + model: 'tts-1', voice: 'alloy', response_format: 'mp3', speed: 0.25, diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts index 3f6792447..3d64a509b 100644 --- a/tests/api-resources/completions.test.ts +++ b/tests/api-resources/completions.test.ts @@ -10,7 +10,10 @@ const openai = new OpenAI({ describe('resource completions', () => { test('create: only required params', async () => { - const responsePromise = openai.completions.create({ model: 'string', prompt: 'This is a test.' }); + const responsePromise = openai.completions.create({ + model: 'gpt-3.5-turbo-instruct', + prompt: 'This is a test.', + }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -22,7 +25,7 @@ describe('resource completions', () => { test('create: required and optional params', async () => { const response = await openai.completions.create({ - model: 'string', + model: 'gpt-3.5-turbo-instruct', prompt: 'This is a test.', best_of: 0, echo: true, From 3530f16bd11b1fba599f23acb450ec6868232b11 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Jun 2024 09:34:00 +0000 Subject: [PATCH 134/533] release: 4.48.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 94b98d286..cc6e3d11b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.48.1" + ".": "4.48.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7926a5ec1..b754e9bef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.48.2 (2024-06-05) + +Full Changelog: [v4.48.1...v4.48.2](https://github.com/openai/openai-node/compare/v4.48.1...v4.48.2) + +### Chores + +* **internal:** minor change to tests ([#881](https://github.com/openai/openai-node/issues/881)) ([5e2d608](https://github.com/openai/openai-node/commit/5e2d608ca9a2bcb3f261ad13c848d327b60b6fb1)) + ## 4.48.1 (2024-06-04) Full Changelog: [v4.48.0...v4.48.1](https://github.com/openai/openai-node/compare/v4.48.0...v4.48.1) diff --git a/README.md b/README.md index 1e208ffa3..cb28494a4 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.48.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.48.2/mod.ts'; ``` diff --git a/package.json b/package.json index fbd66f153..364044dcf 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.48.1", + "version": "4.48.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 9f8c154a8..8c484ef76 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.48.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.48.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 1e0cc13b8..c96a440fa 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.48.1'; // x-release-please-version +export const VERSION = '4.48.2'; // x-release-please-version From 7b4b9a2d72169c37126ab4c681c5107eacaaf2c7 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 6 Jun 2024 03:39:28 -0400 Subject: [PATCH 135/533] chore(internal): minor refactor of tests (#884) --- tests/api-resources/audio/speech.test.ts | 2 +- tests/api-resources/completions.test.ts | 7 ++----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts index 3a7c22a29..18302ce9a 100644 --- a/tests/api-resources/audio/speech.test.ts +++ b/tests/api-resources/audio/speech.test.ts @@ -12,7 +12,7 @@ describe('resource speech', () => { test.skip('create: required and optional params', async () => { const response = await openai.audio.speech.create({ input: 'string', - model: 'tts-1', + model: 'string', voice: 'alloy', response_format: 'mp3', speed: 0.25, diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts index 3d64a509b..3f6792447 100644 --- a/tests/api-resources/completions.test.ts +++ b/tests/api-resources/completions.test.ts @@ -10,10 +10,7 @@ const openai = new OpenAI({ describe('resource completions', () => { test('create: only required params', async () => { - const responsePromise = openai.completions.create({ - model: 'gpt-3.5-turbo-instruct', - prompt: 'This is a test.', - }); + const responsePromise = openai.completions.create({ model: 'string', prompt: 'This is a test.' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -25,7 +22,7 @@ describe('resource completions', () => { test('create: required and optional params', async () => { const response = await openai.completions.create({ - model: 'gpt-3.5-turbo-instruct', + model: 'string', prompt: 'This is a test.', best_of: 0, echo: true, From c15c635a2366345d8fd79eb1fde7b04a5a46d107 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 07:39:51 +0000 Subject: [PATCH 136/533] release: 4.48.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index cc6e3d11b..b271b7c1e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.48.2" + ".": "4.48.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index b754e9bef..1e8d4c8d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.48.3 (2024-06-06) + +Full Changelog: [v4.48.2...v4.48.3](https://github.com/openai/openai-node/compare/v4.48.2...v4.48.3) + +### Chores + +* **internal:** minor refactor of tests ([#884](https://github.com/openai/openai-node/issues/884)) ([0b71f2b](https://github.com/openai/openai-node/commit/0b71f2b2cb67e5714476b6f63b4ef93a0140bff2)) + ## 4.48.2 (2024-06-05) Full Changelog: [v4.48.1...v4.48.2](https://github.com/openai/openai-node/compare/v4.48.1...v4.48.2) diff --git a/README.md b/README.md index cb28494a4..0169a18dc 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.48.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.48.3/mod.ts'; ``` diff --git a/package.json b/package.json index 364044dcf..14f7c3d5b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.48.2", + "version": "4.48.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 8c484ef76..fd2612a13 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.48.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.48.3/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index c96a440fa..0a2f1d907 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.48.2'; // x-release-please-version +export const VERSION = '4.48.3'; // x-release-please-version From ab688c25bbab2651154cae415668332293c814e6 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Thu, 6 Jun 2024 14:55:21 -0400 Subject: [PATCH 137/533] feat(api): updates (#887) --- .stats.yml | 2 +- src/resources/beta/threads/runs/runs.ts | 14 ++++++++++++++ src/resources/beta/threads/threads.ts | 7 +++++++ src/resources/chat/completions.ts | 7 +++++++ tests/api-resources/beta/threads/runs/runs.test.ts | 1 + tests/api-resources/beta/threads/threads.test.ts | 1 + tests/api-resources/chat/completions.test.ts | 1 + 7 files changed, 32 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 11d2b0b18..eb81a249f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0577fd0d08da6b867b002a5accd45f7116ef91c4940b41cf45dc479938c77163.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-ff436357b12348b7c1c930469332a79cd23ac6ec537e645c411893c42de42e57.yml diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 9e44ccfe5..ed5a5ff68 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -402,6 +402,13 @@ export interface Run { */ object: 'thread.run'; + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + * during tool use. + */ + parallel_tool_calls: boolean; + /** * Details on the action required to continue the run. Will be `null` if no action * is required. @@ -685,6 +692,13 @@ export interface RunCreateParamsBase { | 'gpt-3.5-turbo-16k-0613' | null; + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + * during tool use. + */ + parallel_tool_calls?: boolean; + /** * Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 9d27b0328..6978e6eb5 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -560,6 +560,13 @@ export interface ThreadCreateAndRunParamsBase { | 'gpt-3.5-turbo-16k-0613' | null; + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + * during tool use. + */ + parallel_tool_calls?: boolean; + /** * Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index cbf7bcc2c..eeaab3d70 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -757,6 +757,13 @@ export interface ChatCompletionCreateParamsBase { */ n?: number | null; + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + * during tool use. + */ + parallel_tool_calls?: boolean; + /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on * whether they appear in the text so far, increasing the model's likelihood to diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 3ee6ecb4e..5aba82ff8 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -124,6 +124,7 @@ describe('resource runs', () => { max_prompt_tokens: 256, metadata: {}, model: 'gpt-4-turbo', + parallel_tool_calls: true, response_format: 'none', stream: false, temperature: 1, diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index ebc78f357..85f89533c 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -213,6 +213,7 @@ describe('resource threads', () => { max_prompt_tokens: 256, metadata: {}, model: 'gpt-4-turbo', + parallel_tool_calls: true, response_format: 'none', stream: false, temperature: 1, diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 21277e1d6..9404e9e18 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -34,6 +34,7 @@ describe('resource completions', () => { logprobs: true, max_tokens: 0, n: 1, + parallel_tool_calls: true, presence_penalty: -2, response_format: { type: 'json_object' }, seed: -9223372036854776000, From 24de50b142ea44891b73b0e1bf5ce4f11cf10a95 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 18:55:45 +0000 Subject: [PATCH 138/533] release: 4.49.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b271b7c1e..999cfc01c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.48.3" + ".": "4.49.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e8d4c8d4..8853010a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.49.0 (2024-06-06) + +Full Changelog: [v4.48.3...v4.49.0](https://github.com/openai/openai-node/compare/v4.48.3...v4.49.0) + +### Features + +* **api:** updates ([#887](https://github.com/openai/openai-node/issues/887)) ([359eeb3](https://github.com/openai/openai-node/commit/359eeb33b08b371451f216d1e21dd3334ec15f36)) + ## 4.48.3 (2024-06-06) Full Changelog: [v4.48.2...v4.48.3](https://github.com/openai/openai-node/compare/v4.48.2...v4.48.3) diff --git a/README.md b/README.md index 0169a18dc..acd36e4d1 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.48.3/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.49.0/mod.ts'; ``` diff --git a/package.json b/package.json index 14f7c3d5b..5a116918d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.48.3", + "version": "4.49.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index fd2612a13..460fead2b 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.48.3/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.49.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 0a2f1d907..bb12aad49 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.48.3'; // x-release-please-version +export const VERSION = '4.49.0'; // x-release-please-version From 261d356384cdacefd015526dc6a9993bc713ffca Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Fri, 7 Jun 2024 15:40:33 -0400 Subject: [PATCH 139/533] fix: remove erroneous thread create argument (#889) --- .stats.yml | 2 +- src/resources/beta/threads/messages.ts | 22 ++++++++++++++++++++-- src/resources/beta/threads/runs/runs.ts | 15 ++++++++++++--- src/resources/beta/threads/threads.ts | 24 +++++++++++++++++++++--- src/resources/chat/completions.ts | 2 +- 5 files changed, 55 insertions(+), 10 deletions(-) diff --git a/.stats.yml b/.stats.yml index eb81a249f..a6c08f499 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-ff436357b12348b7c1c930469332a79cd23ac6ec537e645c411893c42de42e57.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c085faf70d6ff059fbe11b7b6b98123a612524cb9b8a6f649c99526e5b0b1bdb.yml diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index a5307edbe..4bbdc7426 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -459,7 +459,16 @@ export namespace Message { /** * The tools to add this file to. */ - tools?: Array; + tools?: Array; + } + + export namespace Attachment { + export interface AssistantToolsFileSearchTypeOnly { + /** + * The type of tool being defined: `file_search` + */ + type: 'file_search'; + } } /** @@ -637,7 +646,16 @@ export namespace MessageCreateParams { /** * The tools to add this file to. */ - tools?: Array; + tools?: Array; + } + + export namespace Attachment { + export interface FileSearch { + /** + * The type of tool being defined: `file_search` + */ + type: 'file_search'; + } } } diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index ed5a5ff68..0cc7e35d1 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -404,7 +404,7 @@ export interface Run { /** * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) * during tool use. */ parallel_tool_calls: boolean; @@ -694,7 +694,7 @@ export interface RunCreateParamsBase { /** * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) * during tool use. */ parallel_tool_calls?: boolean; @@ -806,7 +806,16 @@ export namespace RunCreateParams { /** * The tools to add this file to. */ - tools?: Array; + tools?: Array; + } + + export namespace Attachment { + export interface FileSearch { + /** + * The type of tool being defined: `file_search` + */ + type: 'file_search'; + } } } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 6978e6eb5..441bbe41c 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -323,7 +323,16 @@ export namespace ThreadCreateParams { /** * The tools to add this file to. */ - tools?: Array; + tools?: Array; + } + + export namespace Attachment { + export interface FileSearch { + /** + * The type of tool being defined: `file_search` + */ + type: 'file_search'; + } } } @@ -562,7 +571,7 @@ export interface ThreadCreateAndRunParamsBase { /** * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) * during tool use. */ parallel_tool_calls?: boolean; @@ -717,7 +726,16 @@ export namespace ThreadCreateAndRunParams { /** * The tools to add this file to. */ - tools?: Array; + tools?: Array; + } + + export namespace Attachment { + export interface FileSearch { + /** + * The type of tool being defined: `file_search` + */ + type: 'file_search'; + } } } diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index eeaab3d70..b7c301f4e 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -759,7 +759,7 @@ export interface ChatCompletionCreateParamsBase { /** * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) * during tool use. */ parallel_tool_calls?: boolean; From c3515a5ace0054f3aec4b03d7dcaa122384369f9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 7 Jun 2024 19:40:58 +0000 Subject: [PATCH 140/533] release: 4.49.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 999cfc01c..74af4d46e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.49.0" + ".": "4.49.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 8853010a9..d0f49f420 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.49.1 (2024-06-07) + +Full Changelog: [v4.49.0...v4.49.1](https://github.com/openai/openai-node/compare/v4.49.0...v4.49.1) + +### Bug Fixes + +* remove erroneous thread create argument ([#889](https://github.com/openai/openai-node/issues/889)) ([a9f898e](https://github.com/openai/openai-node/commit/a9f898ee109a0b35a672e41c6497f3a75eff7734)) + ## 4.49.0 (2024-06-06) Full Changelog: [v4.48.3...v4.49.0](https://github.com/openai/openai-node/compare/v4.48.3...v4.49.0) diff --git a/README.md b/README.md index acd36e4d1..61c0b2697 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.49.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.49.1/mod.ts'; ``` diff --git a/package.json b/package.json index 5a116918d..f75c903c4 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.49.0", + "version": "4.49.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 460fead2b..73f54338d 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.49.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.49.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index bb12aad49..7df786cb2 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.49.0'; // x-release-please-version +export const VERSION = '4.49.1'; // x-release-please-version From ab47709db24a1d8006a695746be8a180ffbf8c5d Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 10 Jun 2024 16:20:03 -0400 Subject: [PATCH 141/533] feat: support `application/octet-stream` request bodies (#892) --- src/core.ts | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/src/core.ts b/src/core.ts index 39fe0f97f..2d91751d7 100644 --- a/src/core.ts +++ b/src/core.ts @@ -19,7 +19,7 @@ import { type HeadersInit, } from './_shims/index'; export { type Response }; -import { isMultipartBody } from './uploads'; +import { BlobLike, isBlobLike, isMultipartBody } from './uploads'; export { maybeMultipartFormRequestOptions, multipartFormRequestOptions, @@ -249,7 +249,17 @@ export abstract class APIClient { path: string, opts?: PromiseOrValue>, ): APIPromise { - return this.request(Promise.resolve(opts).then((opts) => ({ method, path, ...opts }))); + return this.request( + Promise.resolve(opts).then(async (opts) => { + const body = + opts && isBlobLike(opts?.body) ? new DataView(await opts.body.arrayBuffer()) + : opts?.body instanceof DataView ? opts.body + : opts?.body instanceof ArrayBuffer ? new DataView(opts.body) + : opts && ArrayBuffer.isView(opts?.body) ? new DataView(opts.body.buffer) + : opts?.body; + return { method, path, ...opts, body }; + }), + ); } getAPIList = AbstractPage>( @@ -271,6 +281,8 @@ export abstract class APIClient { const encoded = encoder.encode(body); return encoded.length.toString(); } + } else if (ArrayBuffer.isView(body)) { + return body.byteLength.toString(); } return null; @@ -280,7 +292,9 @@ export abstract class APIClient { const { method, path, query, headers: headers = {} } = options; const body = - isMultipartBody(options.body) ? options.body.body + ArrayBuffer.isView(options.body) || (options.__binaryRequest && typeof options.body === 'string') ? + options.body + : isMultipartBody(options.body) ? options.body.body : options.body ? JSON.stringify(options.body, null, 2) : null; const contentLength = this.calculateContentLength(body); @@ -735,7 +749,9 @@ export type Headers = Record; export type DefaultQuery = Record; export type KeysEnum = { [P in keyof Required]: true }; -export type RequestOptions | Readable> = { +export type RequestOptions< + Req = unknown | Record | Readable | BlobLike | ArrayBufferView | ArrayBuffer, +> = { method?: HTTPMethod; path?: string; query?: Req | undefined; @@ -749,6 +765,7 @@ export type RequestOptions | Readable> = signal?: AbortSignal | undefined | null; idempotencyKey?: string; + __binaryRequest?: boolean | undefined; __binaryResponse?: boolean | undefined; __streamClass?: typeof Stream; }; @@ -770,6 +787,7 @@ const requestOptionsKeys: KeysEnum = { signal: true, idempotencyKey: true, + __binaryRequest: true, __binaryResponse: true, __streamClass: true, }; @@ -783,10 +801,11 @@ export const isRequestOptions = (obj: unknown): obj is RequestOptions => { ); }; -export type FinalRequestOptions | Readable> = RequestOptions & { - method: HTTPMethod; - path: string; -}; +export type FinalRequestOptions | Readable | DataView> = + RequestOptions & { + method: HTTPMethod; + path: string; + }; declare const Deno: any; declare const EdgeRuntime: any; From 2f79293d4ee63253a7826f2b69fb36f545fc6ce4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 20:20:29 +0000 Subject: [PATCH 142/533] release: 4.50.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 74af4d46e..e99be4da9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.49.1" + ".": "4.50.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d0f49f420..47cafc9b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.50.0 (2024-06-10) + +Full Changelog: [v4.49.1...v4.50.0](https://github.com/openai/openai-node/compare/v4.49.1...v4.50.0) + +### Features + +* support `application/octet-stream` request bodies ([#892](https://github.com/openai/openai-node/issues/892)) ([51661c8](https://github.com/openai/openai-node/commit/51661c8068d4990df6916becb6bb85353b54ef4d)) + ## 4.49.1 (2024-06-07) Full Changelog: [v4.49.0...v4.49.1](https://github.com/openai/openai-node/compare/v4.49.0...v4.49.1) diff --git a/README.md b/README.md index 61c0b2697..c1c910645 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.49.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.50.0/mod.ts'; ``` diff --git a/package.json b/package.json index f75c903c4..0cc852583 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.49.1", + "version": "4.50.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 73f54338d..6d6109f7b 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.49.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.50.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 7df786cb2..44f8e47e3 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.49.1'; // x-release-please-version +export const VERSION = '4.50.0'; // x-release-please-version From 2044b6445654d986d9cc4e9fef772104c009d2a4 Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Wed, 12 Jun 2024 14:44:44 -0400 Subject: [PATCH 143/533] feat(api): updates (#894) --- .stats.yml | 2 +- src/resources/beta/threads/messages.ts | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/.stats.yml b/.stats.yml index a6c08f499..c5ada3b5d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c085faf70d6ff059fbe11b7b6b98123a612524cb9b8a6f649c99526e5b0b1bdb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5cb1810135c35c5024698f3365626471a04796e26e393aefe1aa0ba3c0891919.yml diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 4bbdc7426..07c5a573c 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -129,11 +129,6 @@ export namespace FileCitationAnnotation { * The ID of the specific File the citation is from. */ file_id: string; - - /** - * The specific quote in the file. - */ - quote: string; } } From 70d2bb37fae06e685f73a98db3eca6e540c8ed01 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 12 Jun 2024 18:45:08 +0000 Subject: [PATCH 144/533] release: 4.51.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e99be4da9..58c53eaeb 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.50.0" + ".": "4.51.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 47cafc9b0..8c2a3b446 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.51.0 (2024-06-12) + +Full Changelog: [v4.50.0...v4.51.0](https://github.com/openai/openai-node/compare/v4.50.0...v4.51.0) + +### Features + +* **api:** updates ([#894](https://github.com/openai/openai-node/issues/894)) ([b58f5a1](https://github.com/openai/openai-node/commit/b58f5a1344f631dac0fb8ecfa4fbae49af070189)) + ## 4.50.0 (2024-06-10) Full Changelog: [v4.49.1...v4.50.0](https://github.com/openai/openai-node/compare/v4.49.1...v4.50.0) diff --git a/README.md b/README.md index c1c910645..6dcc2abc4 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.50.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.51.0/mod.ts'; ``` diff --git a/package.json b/package.json index 0cc852583..f5401bb8c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.50.0", + "version": "4.51.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 6d6109f7b..2d0cbecc2 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.50.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.51.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 44f8e47e3..9daf60a23 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.50.0'; // x-release-please-version +export const VERSION = '4.51.0'; // x-release-please-version From bd5b4ab55ee0fd8baee345413ec79405ece29d09 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 19:53:04 +0000 Subject: [PATCH 145/533] feat(api): add service tier argument for chat completions (#900) --- .stats.yml | 2 +- src/resources/chat/completions.ts | 25 ++++++++++++++++++++ tests/api-resources/chat/completions.test.ts | 1 + 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index c5ada3b5d..aa7e8427b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5cb1810135c35c5024698f3365626471a04796e26e393aefe1aa0ba3c0891919.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8fe357c6b5a425d810d731e4102a052d8e38c5e2d66950e6de1025415160bf88.yml diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index b7c301f4e..664def2b2 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -66,6 +66,12 @@ export interface ChatCompletion { */ object: 'chat.completion'; + /** + * The service tier used for processing the request. This field is only included if + * the `service_tier` parameter is specified in the request. + */ + service_tier?: 'scale' | 'default' | null; + /** * This fingerprint represents the backend configuration that the model runs with. * @@ -205,6 +211,12 @@ export interface ChatCompletionChunk { */ object: 'chat.completion.chunk'; + /** + * The service tier used for processing the request. This field is only included if + * the `service_tier` parameter is specified in the request. + */ + service_tier?: 'scale' | 'default' | null; + /** * This fingerprint represents the backend configuration that the model runs with. * Can be used in conjunction with the `seed` request parameter to understand when @@ -800,6 +812,19 @@ export interface ChatCompletionCreateParamsBase { */ seed?: number | null; + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', the system will utilize scale tier credits until they are + * exhausted. + * - If set to 'default', the request will be processed in the shared cluster. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + service_tier?: 'auto' | 'default' | null; + /** * Up to 4 sequences where the API will stop generating further tokens. */ diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 9404e9e18..c63466f99 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -38,6 +38,7 @@ describe('resource completions', () => { presence_penalty: -2, response_format: { type: 'json_object' }, seed: -9223372036854776000, + service_tier: 'auto', stop: 'string', stream: false, stream_options: { include_usage: true }, From 22cf0362c4a72873433d10452248934672bd65ac Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 19:53:29 +0000 Subject: [PATCH 146/533] release: 4.52.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 58c53eaeb..5ed75d11b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.51.0" + ".": "4.52.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c2a3b446..f9c5d60a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.52.0 (2024-06-18) + +Full Changelog: [v4.51.0...v4.52.0](https://github.com/openai/openai-node/compare/v4.51.0...v4.52.0) + +### Features + +* **api:** add service tier argument for chat completions ([#900](https://github.com/openai/openai-node/issues/900)) ([91e6651](https://github.com/openai/openai-node/commit/91e66514037a8d6f9c39d3c96cd5769885925a4b)) + ## 4.51.0 (2024-06-12) Full Changelog: [v4.50.0...v4.51.0](https://github.com/openai/openai-node/compare/v4.50.0...v4.51.0) diff --git a/README.md b/README.md index 6dcc2abc4..ebbb38293 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.51.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.52.0/mod.ts'; ``` diff --git a/package.json b/package.json index f5401bb8c..c93d881cc 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.51.0", + "version": "4.52.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 2d0cbecc2..c842fa5bc 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.51.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.52.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 9daf60a23..0f31a0778 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.51.0'; // x-release-please-version +export const VERSION = '4.52.0'; // x-release-please-version From fceba0d44edb407c2d28857ff303ba587c01c030 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 21 Jun 2024 09:28:46 +0000 Subject: [PATCH 147/533] chore(internal): re-order some imports (#904) --- src/resources/audio/speech.ts | 2 +- src/resources/files.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index bcfbc80cc..0c6a97ab5 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -2,8 +2,8 @@ import * as Core from '../../core'; import { APIResource } from '../../resource'; -import { type Response } from '../../_shims/index'; import * as SpeechAPI from './speech'; +import { type Response } from '../../_shims/index'; export class Speech extends APIResource { /** diff --git a/src/resources/files.ts b/src/resources/files.ts index d86dd9972..30f559890 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -3,12 +3,12 @@ import * as Core from '../core'; import { APIResource } from '../resource'; import { isRequestOptions } from '../core'; -import { type Response } from '../_shims/index'; import { sleep } from '../core'; import { APIConnectionTimeoutError } from '../error'; import * as FilesAPI from './files'; import { type Uploadable, multipartFormRequestOptions } from '../core'; import { Page } from '../pagination'; +import { type Response } from '../_shims/index'; export class Files extends APIResource { /** From 1c2245d5bd196b0f09518acf61ce1f0326e7fe66 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 21 Jun 2024 10:16:18 +0000 Subject: [PATCH 148/533] chore(doc): clarify service tier default value (#908) --- .stats.yml | 2 +- src/resources/chat/completions.ts | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index aa7e8427b..04682ea0a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8fe357c6b5a425d810d731e4102a052d8e38c5e2d66950e6de1025415160bf88.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3a69e1cc9e1efda3fb82d0fb35961749f886a87594dae9d8d2aa5c60f157f5d2.yml diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 664def2b2..b49cf1069 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -818,7 +818,8 @@ export interface ChatCompletionCreateParamsBase { * * - If set to 'auto', the system will utilize scale tier credits until they are * exhausted. - * - If set to 'default', the request will be processed in the shared cluster. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. * * When this parameter is set, the response body will include the `service_tier` * utilized. From 9edcdfd39768e2138ec374cfd3fa3ce8e732ac73 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 25 Jun 2024 15:59:55 +0000 Subject: [PATCH 149/533] chore(internal): minor reformatting (#911) --- src/index.ts | 9 ++++++--- src/resources/audio/speech.ts | 2 +- src/resources/audio/transcriptions.ts | 7 +++---- src/resources/audio/translations.ts | 7 +++---- src/resources/batches.ts | 2 +- src/resources/beta/assistants.ts | 2 +- src/resources/beta/threads/messages.ts | 2 +- src/resources/beta/threads/runs/runs.ts | 4 ++-- src/resources/beta/threads/runs/steps.ts | 2 +- src/resources/beta/threads/threads.ts | 4 ++-- src/resources/beta/vector-stores/file-batches.ts | 2 +- src/resources/beta/vector-stores/files.ts | 5 ++--- src/resources/beta/vector-stores/vector-stores.ts | 2 +- src/resources/chat/completions.ts | 4 ++-- src/resources/completions.ts | 4 ++-- src/resources/embeddings.ts | 2 +- src/resources/files.ts | 7 +++---- src/resources/fine-tuning/jobs/checkpoints.ts | 2 +- src/resources/fine-tuning/jobs/jobs.ts | 2 +- src/resources/images.ts | 13 ++++++------- src/resources/models.ts | 2 +- src/resources/moderations.ts | 2 +- tests/stringifyQuery.test.ts | 9 ++++++--- 23 files changed, 49 insertions(+), 48 deletions(-) diff --git a/src/index.ts b/src/index.ts index fdafabf3d..ce455108e 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from './core'; import * as Errors from './error'; -import { type Agent, type RequestInit } from './_shims/index'; import * as Uploads from './uploads'; +import { type Agent, type RequestInit } from './_shims/index'; +import * as Core from './core'; import * as Pagination from './pagination'; import * as API from './resources/index'; @@ -86,7 +86,9 @@ export interface ClientOptions { dangerouslyAllowBrowser?: boolean; } -/** API Client for interfacing with the OpenAI API. */ +/** + * API Client for interfacing with the OpenAI API. + */ export class OpenAI extends Core.APIClient { apiKey: string; organization: string | null; @@ -143,6 +145,7 @@ export class OpenAI extends Core.APIClient { maxRetries: options.maxRetries, fetch: options.fetch, }); + this._options = options; this.apiKey = apiKey; diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 0c6a97ab5..d0a6e7f31 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../core'; import { APIResource } from '../../resource'; +import * as Core from '../../core'; import * as SpeechAPI from './speech'; import { type Response } from '../../_shims/index'; diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index bbffce4ed..0eb4e4b7c 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -1,16 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../core'; import { APIResource } from '../../resource'; +import * as Core from '../../core'; import * as TranscriptionsAPI from './transcriptions'; -import { type Uploadable, multipartFormRequestOptions } from '../../core'; export class Transcriptions extends APIResource { /** * Transcribes audio into the input language. */ create(body: TranscriptionCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/audio/transcriptions', multipartFormRequestOptions({ body, ...options })); + return this._client.post('/audio/transcriptions', Core.multipartFormRequestOptions({ body, ...options })); } } @@ -30,7 +29,7 @@ export interface TranscriptionCreateParams { * The audio file object (not file name) to transcribe, in one of these formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. */ - file: Uploadable; + file: Core.Uploadable; /** * ID of the model to use. Only `whisper-1` (which is powered by our open source diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 890c59d55..48fddc2ee 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -1,16 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../core'; import { APIResource } from '../../resource'; +import * as Core from '../../core'; import * as TranslationsAPI from './translations'; -import { type Uploadable, multipartFormRequestOptions } from '../../core'; export class Translations extends APIResource { /** * Translates audio into English. */ create(body: TranslationCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/audio/translations', multipartFormRequestOptions({ body, ...options })); + return this._client.post('/audio/translations', Core.multipartFormRequestOptions({ body, ...options })); } } @@ -23,7 +22,7 @@ export interface TranslationCreateParams { * The audio file object (not file name) translate, in one of these formats: flac, * mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. */ - file: Uploadable; + file: Core.Uploadable; /** * ID of the model to use. Only `whisper-1` (which is powered by our open source diff --git a/src/resources/batches.ts b/src/resources/batches.ts index d23c059dc..738582f9e 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../core'; import { APIResource } from '../resource'; import { isRequestOptions } from '../core'; +import * as Core from '../core'; import * as BatchesAPI from './batches'; import { CursorPage, type CursorPageParams } from '../pagination'; diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index cdea09266..5d326a593 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../core'; import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; import * as AssistantsAPI from './assistants'; import * as Shared from '../shared'; import * as MessagesAPI from './threads/messages'; diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 07c5a573c..db58f45b8 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../../core'; import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; import * as MessagesAPI from './messages'; import * as AssistantsAPI from '../assistants'; import { CursorPage, type CursorPageParams } from '../../../pagination'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 0cc7e35d1..b4ed09cc2 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../../../core'; -import { APIPromise } from '../../../../core'; import { APIResource } from '../../../../resource'; import { isRequestOptions } from '../../../../core'; +import { APIPromise } from '../../../../core'; +import * as Core from '../../../../core'; import { AssistantStream, RunCreateParamsBaseStream } from '../../../../lib/AssistantStream'; import { sleep } from '../../../../core'; import { RunSubmitToolOutputsParamsStream } from '../../../../lib/AssistantStream'; diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index 0cbb60ca4..09605d458 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../../../core'; import { APIResource } from '../../../../resource'; import { isRequestOptions } from '../../../../core'; +import * as Core from '../../../../core'; import * as StepsAPI from './steps'; import { CursorPage, type CursorPageParams } from '../../../../pagination'; diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 441bbe41c..aded9daf1 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../../core'; -import { APIPromise } from '../../../core'; import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import { AssistantStream, ThreadCreateAndRunParamsBaseStream } from '../../../lib/AssistantStream'; +import { APIPromise } from '../../../core'; +import * as Core from '../../../core'; import * as ThreadsAPI from './threads'; import * as AssistantsAPI from '../assistants'; import * as MessagesAPI from './messages'; diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts index 2483e984f..890a92190 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/beta/vector-stores/file-batches.ts @@ -1,11 +1,11 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../../core'; import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import { sleep } from '../../../core'; import { Uploadable } from '../../../core'; import { allSettledWithThrow } from '../../../lib/Util'; +import * as Core from '../../../core'; import * as FileBatchesAPI from './file-batches'; import * as FilesAPI from './files'; import { VectorStoreFilesPage } from './files'; diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index 04a0413be..594c51970 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -1,9 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../../core'; import { APIResource } from '../../../resource'; -import { isRequestOptions } from '../../../core'; -import { sleep, Uploadable } from '../../../core'; +import { sleep, Uploadable, isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; import * as FilesAPI from './files'; import { CursorPage, type CursorPageParams } from '../../../pagination'; diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index d2d4c7d39..343f25953 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../../core'; import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; import * as VectorStoresAPI from './vector-stores'; import * as FileBatchesAPI from './file-batches'; import * as FilesAPI from './files'; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index b49cf1069..44eb9520c 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../core'; -import { APIPromise } from '../../core'; import { APIResource } from '../../resource'; +import { APIPromise } from '../../core'; +import * as Core from '../../core'; import * as ChatCompletionsAPI from './completions'; import * as CompletionsAPI from '../completions'; import * as Shared from '../shared'; diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 26bf5ca0d..a6b527995 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../core'; -import { APIPromise } from '../core'; import { APIResource } from '../resource'; +import { APIPromise } from '../core'; +import * as Core from '../core'; import * as CompletionsAPI from './completions'; import * as ChatCompletionsAPI from './chat/completions'; import { Stream } from '../streaming'; diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index 28c954711..f72b9308a 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../core'; import { APIResource } from '../resource'; +import * as Core from '../core'; import * as EmbeddingsAPI from './embeddings'; export class Embeddings extends APIResource { diff --git a/src/resources/files.ts b/src/resources/files.ts index 30f559890..a2d3aaa44 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -1,12 +1,11 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../core'; import { APIResource } from '../resource'; import { isRequestOptions } from '../core'; import { sleep } from '../core'; import { APIConnectionTimeoutError } from '../error'; +import * as Core from '../core'; import * as FilesAPI from './files'; -import { type Uploadable, multipartFormRequestOptions } from '../core'; import { Page } from '../pagination'; import { type Response } from '../_shims/index'; @@ -35,7 +34,7 @@ export class Files extends APIResource { * storage limits. */ create(body: FileCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/files', multipartFormRequestOptions({ body, ...options })); + return this._client.post('/files', Core.multipartFormRequestOptions({ body, ...options })); } /** @@ -188,7 +187,7 @@ export interface FileCreateParams { /** * The File object (not file name) to be uploaded. */ - file: Uploadable; + file: Core.Uploadable; /** * The intended purpose of the uploaded file. diff --git a/src/resources/fine-tuning/jobs/checkpoints.ts b/src/resources/fine-tuning/jobs/checkpoints.ts index 0e3cdeb79..02896b26d 100644 --- a/src/resources/fine-tuning/jobs/checkpoints.ts +++ b/src/resources/fine-tuning/jobs/checkpoints.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../../core'; import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; import * as CheckpointsAPI from './checkpoints'; import { CursorPage, type CursorPageParams } from '../../../pagination'; diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 12990c6fc..c4aae364a 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -1,8 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../../../core'; import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; import * as JobsAPI from './jobs'; import * as CheckpointsAPI from './checkpoints'; import { CursorPage, type CursorPageParams } from '../../../pagination'; diff --git a/src/resources/images.ts b/src/resources/images.ts index 337909578..24af635b2 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -1,9 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../core'; import { APIResource } from '../resource'; +import * as Core from '../core'; import * as ImagesAPI from './images'; -import { type Uploadable, multipartFormRequestOptions } from '../core'; export class Images extends APIResource { /** @@ -13,14 +12,14 @@ export class Images extends APIResource { body: ImageCreateVariationParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/images/variations', multipartFormRequestOptions({ body, ...options })); + return this._client.post('/images/variations', Core.multipartFormRequestOptions({ body, ...options })); } /** * Creates an edited or extended image given an original image and a prompt. */ edit(body: ImageEditParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/images/edits', multipartFormRequestOptions({ body, ...options })); + return this._client.post('/images/edits', Core.multipartFormRequestOptions({ body, ...options })); } /** @@ -64,7 +63,7 @@ export interface ImageCreateVariationParams { * The image to use as the basis for the variation(s). Must be a valid PNG file, * less than 4MB, and square. */ - image: Uploadable; + image: Core.Uploadable; /** * The model to use for image generation. Only `dall-e-2` is supported at this @@ -104,7 +103,7 @@ export interface ImageEditParams { * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask * is not provided, image must have transparency, which will be used as the mask. */ - image: Uploadable; + image: Core.Uploadable; /** * A text description of the desired image(s). The maximum length is 1000 @@ -117,7 +116,7 @@ export interface ImageEditParams { * indicate where `image` should be edited. Must be a valid PNG file, less than * 4MB, and have the same dimensions as `image`. */ - mask?: Uploadable; + mask?: Core.Uploadable; /** * The model to use for image generation. Only `dall-e-2` is supported at this diff --git a/src/resources/models.ts b/src/resources/models.ts index 1d94c6c55..178915747 100644 --- a/src/resources/models.ts +++ b/src/resources/models.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../core'; import { APIResource } from '../resource'; +import * as Core from '../core'; import * as ModelsAPI from './models'; import { Page } from '../pagination'; diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index c018f65e7..86fbbc6b2 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Core from '../core'; import { APIResource } from '../resource'; +import * as Core from '../core'; import * as ModerationsAPI from './moderations'; export class Moderations extends APIResource { diff --git a/tests/stringifyQuery.test.ts b/tests/stringifyQuery.test.ts index 6db84d3fe..724743f30 100644 --- a/tests/stringifyQuery.test.ts +++ b/tests/stringifyQuery.test.ts @@ -1,8 +1,10 @@ -import { APIClient } from 'openai/core'; +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -const { stringifyQuery } = APIClient.prototype as any; +import { OpenAI } from 'openai'; -describe('APIClient.stringifyQuery', () => { +const { stringifyQuery } = OpenAI.prototype as any; + +describe(stringifyQuery, () => { for (const [input, expected] of [ [{ a: '1', b: 2, c: true }, 'a=1&b=2&c=true'], [{ a: null, b: false, c: undefined }, 'a=&b=false'], @@ -18,6 +20,7 @@ describe('APIClient.stringifyQuery', () => { expect(stringifyQuery(input)).toEqual(expected); }); } + for (const value of [[], {}, new Date()]) { it(`${JSON.stringify(value)} -> `, () => { expect(() => stringifyQuery({ value })).toThrow(`Cannot stringify type ${typeof value}`); From 31554691829c05871ef20b44a6d4588588a61532 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 25 Jun 2024 16:00:16 +0000 Subject: [PATCH 150/533] release: 4.52.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 10 ++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 15 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 5ed75d11b..da7db6479 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.52.0" + ".": "4.52.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index f9c5d60a0..2288edfd6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Changelog +## 4.52.1 (2024-06-25) + +Full Changelog: [v4.52.0...v4.52.1](https://github.com/openai/openai-node/compare/v4.52.0...v4.52.1) + +### Chores + +* **doc:** clarify service tier default value ([#908](https://github.com/openai/openai-node/issues/908)) ([e4c8100](https://github.com/openai/openai-node/commit/e4c8100c7732bdc336b52a48d09945782c0fa2a3)) +* **internal:** minor reformatting ([#911](https://github.com/openai/openai-node/issues/911)) ([78c9377](https://github.com/openai/openai-node/commit/78c9377fcd563645081629a89f3fda2c1ff4e175)) +* **internal:** re-order some imports ([#904](https://github.com/openai/openai-node/issues/904)) ([dbd5c40](https://github.com/openai/openai-node/commit/dbd5c4053ba2f255dfc56676ced5b30381843c75)) + ## 4.52.0 (2024-06-18) Full Changelog: [v4.51.0...v4.52.0](https://github.com/openai/openai-node/compare/v4.51.0...v4.52.0) diff --git a/README.md b/README.md index ebbb38293..04dc048a5 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.52.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.52.1/mod.ts'; ``` diff --git a/package.json b/package.json index c93d881cc..13c0b508b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.52.0", + "version": "4.52.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index c842fa5bc..ad438d13c 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.52.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.52.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 0f31a0778..77b6f5f83 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.52.0'; // x-release-please-version +export const VERSION = '4.52.1'; // x-release-please-version From 18a97570c2b042f2d82bef34246752e1de025414 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 28 Jun 2024 14:11:17 +0000 Subject: [PATCH 151/533] chore: gitignore test server logs (#914) --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 733d72ecf..0af7568e5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.prism.log node_modules yarn-error.log codegen.log From 01c504329b853953840c9419c8e5c5adb1fae081 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 28 Jun 2024 14:11:39 +0000 Subject: [PATCH 152/533] release: 4.52.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index da7db6479..e6023a2ff 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.52.1" + ".": "4.52.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2288edfd6..199217269 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.52.2 (2024-06-28) + +Full Changelog: [v4.52.1...v4.52.2](https://github.com/openai/openai-node/compare/v4.52.1...v4.52.2) + +### Chores + +* gitignore test server logs ([#914](https://github.com/openai/openai-node/issues/914)) ([6316720](https://github.com/openai/openai-node/commit/6316720c3fdd0422965ae3890275062bc0fe3c2b)) + ## 4.52.1 (2024-06-25) Full Changelog: [v4.52.0...v4.52.1](https://github.com/openai/openai-node/compare/v4.52.0...v4.52.1) diff --git a/README.md b/README.md index 04dc048a5..89319e1f2 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.52.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.52.2/mod.ts'; ``` diff --git a/package.json b/package.json index 13c0b508b..a895a7203 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.52.1", + "version": "4.52.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index ad438d13c..f81a4b747 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.52.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.52.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 77b6f5f83..bf5e2cb57 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.52.1'; // x-release-please-version +export const VERSION = '4.52.2'; // x-release-please-version From 7da05e3ea5f5a29faa5d564f152a44c9d94e00a1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 19:50:05 +0000 Subject: [PATCH 153/533] chore: minor change to tests (#916) --- .stats.yml | 2 +- tests/api-resources/chat/completions.test.ts | 2 +- tests/api-resources/completions.test.ts | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.stats.yml b/.stats.yml index 04682ea0a..57f5afaff 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3a69e1cc9e1efda3fb82d0fb35961749f886a87594dae9d8d2aa5c60f157f5d2.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-27d8d6da893c1cdd53b491ec05153df22b1e113965f253a1d6eb8d75b628173f.yml diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index c63466f99..56c882d17 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -37,7 +37,7 @@ describe('resource completions', () => { parallel_tool_calls: true, presence_penalty: -2, response_format: { type: 'json_object' }, - seed: -9223372036854776000, + seed: -9007199254740991, service_tier: 'auto', stop: 'string', stream: false, diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts index 3f6792447..f78f7a593 100644 --- a/tests/api-resources/completions.test.ts +++ b/tests/api-resources/completions.test.ts @@ -32,7 +32,7 @@ describe('resource completions', () => { max_tokens: 16, n: 1, presence_penalty: -2, - seed: -9223372036854776000, + seed: -9007199254740991, stop: '\n', stream: false, stream_options: { include_usage: true }, From d094e838b1d3877b518e861555cb19c21c3b85cd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 19:50:31 +0000 Subject: [PATCH 154/533] release: 4.52.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e6023a2ff..4fe1eeec4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.52.2" + ".": "4.52.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 199217269..d56262e47 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.52.3 (2024-07-02) + +Full Changelog: [v4.52.2...v4.52.3](https://github.com/openai/openai-node/compare/v4.52.2...v4.52.3) + +### Chores + +* minor change to tests ([#916](https://github.com/openai/openai-node/issues/916)) ([b8a33e3](https://github.com/openai/openai-node/commit/b8a33e31697b52d733f28d9380e0c02a2d179474)) + ## 4.52.2 (2024-06-28) Full Changelog: [v4.52.1...v4.52.2](https://github.com/openai/openai-node/compare/v4.52.1...v4.52.2) diff --git a/README.md b/README.md index 89319e1f2..fd707baf1 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.52.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.52.3/mod.ts'; ``` diff --git a/package.json b/package.json index a895a7203..4f76ed2b1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.52.2", + "version": "4.52.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index f81a4b747..208849530 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.52.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.52.3/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index bf5e2cb57..9df91ee45 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.52.2'; // x-release-please-version +export const VERSION = '4.52.3'; // x-release-please-version From 8847d22b3014c108f711d48264847d05904b94de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20L=C3=A9vy?= Date: Mon, 8 Jul 2024 18:53:00 +0200 Subject: [PATCH 155/533] refactor(examples): removedduplicated 'messageDelta' streaming event. (#909) --- examples/assistant-stream.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/assistant-stream.ts b/examples/assistant-stream.ts index 6c71bf23b..d1d5b040f 100755 --- a/examples/assistant-stream.ts +++ b/examples/assistant-stream.ts @@ -39,7 +39,6 @@ async function main() { .on('textDelta', (delta, snapshot) => console.log(snapshot)) .on('messageDelta', (delta, snapshot) => console.log(snapshot)) .on('run', (run) => console.log(run)) - .on('messageDelta', (delta, snapshot) => console.log(snapshot)) .on('connect', () => console.log()); const result = await run.finalRun(); console.log('Run Result' + result); From 782a2d9f900d930d159fdd4e0b149ab4df57db8c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 16:53:18 +0000 Subject: [PATCH 156/533] release: 4.52.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 4fe1eeec4..d998f5422 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.52.3" + ".": "4.52.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d56262e47..592724a41 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.52.4 (2024-07-08) + +Full Changelog: [v4.52.3...v4.52.4](https://github.com/openai/openai-node/compare/v4.52.3...v4.52.4) + +### Refactors + +* **examples:** removedduplicated 'messageDelta' streaming event. ([#909](https://github.com/openai/openai-node/issues/909)) ([7b0b3d2](https://github.com/openai/openai-node/commit/7b0b3d2e228532fca19f49390a2831a1abac72a4)) + ## 4.52.3 (2024-07-02) Full Changelog: [v4.52.2...v4.52.3](https://github.com/openai/openai-node/compare/v4.52.2...v4.52.3) diff --git a/README.md b/README.md index fd707baf1..b2f6d0ce4 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.52.3/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.52.4/mod.ts'; ``` diff --git a/package.json b/package.json index 4f76ed2b1..c104fef39 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.52.3", + "version": "4.52.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 208849530..577844cbe 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.52.3/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.52.4/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 9df91ee45..d3f41b987 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.52.3'; // x-release-please-version +export const VERSION = '4.52.4'; // x-release-please-version From 4c804715318cc4cbb0de071426d5b93d85f38539 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 10 Jul 2024 08:41:18 +0100 Subject: [PATCH 157/533] fix(vectorStores): correctly handle missing `files` in `uploadAndPoll()` (#926) --- src/resources/beta/vector-stores/file-batches.ts | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts index 890a92190..e4a5c46fe 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/beta/vector-stores/file-batches.ts @@ -155,19 +155,22 @@ export class FileBatches extends APIResource { { files, fileIds = [] }: { files: Uploadable[]; fileIds?: string[] }, options?: Core.RequestOptions & { pollIntervalMs?: number; maxConcurrency?: number }, ): Promise { - if (files === null || files.length == 0) { - throw new Error('No files provided to process.'); + if (files == null || files.length == 0) { + throw new Error( + `No \`files\` provided to process. If you've already uploaded files you should use \`.createAndPoll()\` instead`, + ); } const configuredConcurrency = options?.maxConcurrency ?? 5; - //We cap the number of workers at the number of files (so we don't start any unnecessary workers) + + // We cap the number of workers at the number of files (so we don't start any unnecessary workers) const concurrencyLimit = Math.min(configuredConcurrency, files.length); const client = this._client; const fileIterator = files.values(); const allFileIds: string[] = [...fileIds]; - //This code is based on this design. The libraries don't accommodate our environment limits. + // This code is based on this design. The libraries don't accommodate our environment limits. // https://stackoverflow.com/questions/40639432/what-is-the-best-way-to-limit-concurrency-when-using-es6s-promise-all async function processFiles(iterator: IterableIterator) { for (let item of iterator) { @@ -176,10 +179,10 @@ export class FileBatches extends APIResource { } } - //Start workers to process results + // Start workers to process results const workers = Array(concurrencyLimit).fill(fileIterator).map(processFiles); - //Wait for all processing to complete. + // Wait for all processing to complete. await allSettledWithThrow(workers); return await this.createAndPoll(vectorStoreId, { From cf7da2ba056b1d6cc16dd41e0b414bc915c3a4f9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 07:41:38 +0000 Subject: [PATCH 158/533] release: 4.52.5 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d998f5422..8fe5ee19c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.52.4" + ".": "4.52.5" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 592724a41..cf1796bd6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.52.5 (2024-07-10) + +Full Changelog: [v4.52.4...v4.52.5](https://github.com/openai/openai-node/compare/v4.52.4...v4.52.5) + +### Bug Fixes + +* **vectorStores:** correctly handle missing `files` in `uploadAndPoll()` ([#926](https://github.com/openai/openai-node/issues/926)) ([945fca6](https://github.com/openai/openai-node/commit/945fca646b02b52bbc9163cb51f5d87e7db8afd6)) + ## 4.52.4 (2024-07-08) Full Changelog: [v4.52.3...v4.52.4](https://github.com/openai/openai-node/compare/v4.52.3...v4.52.4) diff --git a/README.md b/README.md index b2f6d0ce4..1e8a544ee 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.52.4/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.52.5/mod.ts'; ``` diff --git a/package.json b/package.json index c104fef39..d8c6dc739 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.52.4", + "version": "4.52.5", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 577844cbe..bd1e495d3 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.52.4/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.52.5/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index d3f41b987..b1bb67f20 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.52.4'; // x-release-please-version +export const VERSION = '4.52.5'; // x-release-please-version From 2a7694d66ee7e31810b761ec346a52a6f9f7a370 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 10:22:54 +0000 Subject: [PATCH 159/533] chore(ci): also run workflows for PRs targeting `next` (#931) --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a55376f66..3be379044 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,6 +6,7 @@ on: pull_request: branches: - master + - next jobs: lint: From fe84709cc432ac2ca43e180bd9d311bbc09a1e78 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 10:23:25 +0000 Subject: [PATCH 160/533] release: 4.52.6 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 8fe5ee19c..4e9f7a5f0 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.52.5" + ".": "4.52.6" } diff --git a/CHANGELOG.md b/CHANGELOG.md index cf1796bd6..387555c91 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.52.6 (2024-07-11) + +Full Changelog: [v4.52.5...v4.52.6](https://github.com/openai/openai-node/compare/v4.52.5...v4.52.6) + +### Chores + +* **ci:** also run workflows for PRs targeting `next` ([#931](https://github.com/openai/openai-node/issues/931)) ([e3f979a](https://github.com/openai/openai-node/commit/e3f979ae94b2252b9552d1e03de5b92d398a3e28)) + ## 4.52.5 (2024-07-10) Full Changelog: [v4.52.4...v4.52.5](https://github.com/openai/openai-node/compare/v4.52.4...v4.52.5) diff --git a/README.md b/README.md index 1e8a544ee..db66d4303 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.52.5/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.52.6/mod.ts'; ``` diff --git a/package.json b/package.json index d8c6dc739..abbaaaf7c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.52.5", + "version": "4.52.6", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index bd1e495d3..7e8dbdc6e 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.52.5/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.52.6/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index b1bb67f20..8a81ba44e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.52.5'; // x-release-please-version +export const VERSION = '4.52.6'; // x-release-please-version From acc78034442e198d6181d772bfb5b21b14ba9626 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 14:28:50 +0000 Subject: [PATCH 161/533] docs(examples): update example values (#933) --- tests/api-resources/audio/speech.test.ts | 2 +- .../audio/transcriptions.test.ts | 4 +- .../api-resources/audio/translations.test.ts | 4 +- tests/api-resources/batches.test.ts | 14 ++--- tests/api-resources/beta/assistants.test.ts | 22 ++++---- .../beta/threads/messages.test.ts | 28 +++++----- .../beta/threads/runs/runs.test.ts | 56 +++++++++---------- .../beta/threads/runs/steps.test.ts | 14 ++--- .../beta/threads/threads.test.ts | 52 ++++++++--------- .../beta/vector-stores/file-batches.test.ts | 16 +++--- .../beta/vector-stores/files.test.ts | 16 +++--- .../beta/vector-stores/vector-stores.test.ts | 12 ++-- tests/api-resources/chat/completions.test.ts | 21 +++++-- tests/api-resources/files.test.ts | 16 +++--- .../fine-tuning/jobs/checkpoints.test.ts | 2 +- .../fine-tuning/jobs/jobs.test.ts | 16 +++--- 16 files changed, 153 insertions(+), 142 deletions(-) diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts index 18302ce9a..7509c19ca 100644 --- a/tests/api-resources/audio/speech.test.ts +++ b/tests/api-resources/audio/speech.test.ts @@ -11,7 +11,7 @@ describe('resource speech', () => { // binary tests are currently broken test.skip('create: required and optional params', async () => { const response = await openai.audio.speech.create({ - input: 'string', + input: 'input', model: 'string', voice: 'alloy', response_format: 'mp3', diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts index 3fc4ca22b..938ddd2b3 100644 --- a/tests/api-resources/audio/transcriptions.test.ts +++ b/tests/api-resources/audio/transcriptions.test.ts @@ -27,8 +27,8 @@ describe('resource transcriptions', () => { const response = await openai.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'whisper-1', - language: 'string', - prompt: 'string', + language: 'language', + prompt: 'prompt', response_format: 'json', temperature: 0, timestamp_granularities: ['word', 'segment'], diff --git a/tests/api-resources/audio/translations.test.ts b/tests/api-resources/audio/translations.test.ts index 0853bedfb..3f05bc90f 100644 --- a/tests/api-resources/audio/translations.test.ts +++ b/tests/api-resources/audio/translations.test.ts @@ -27,8 +27,8 @@ describe('resource translations', () => { const response = await openai.audio.translations.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'whisper-1', - prompt: 'string', - response_format: 'string', + prompt: 'prompt', + response_format: 'response_format', temperature: 0, }); }); diff --git a/tests/api-resources/batches.test.ts b/tests/api-resources/batches.test.ts index 2cd845de6..2861298a8 100644 --- a/tests/api-resources/batches.test.ts +++ b/tests/api-resources/batches.test.ts @@ -13,7 +13,7 @@ describe('resource batches', () => { const responsePromise = openai.batches.create({ completion_window: '24h', endpoint: '/v1/chat/completions', - input_file_id: 'string', + input_file_id: 'input_file_id', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -28,13 +28,13 @@ describe('resource batches', () => { const response = await openai.batches.create({ completion_window: '24h', endpoint: '/v1/chat/completions', - input_file_id: 'string', + input_file_id: 'input_file_id', metadata: { foo: 'string' }, }); }); test('retrieve', async () => { - const responsePromise = openai.batches.retrieve('string'); + const responsePromise = openai.batches.retrieve('batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -46,7 +46,7 @@ describe('resource batches', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.batches.retrieve('string', { path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(openai.batches.retrieve('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); @@ -72,12 +72,12 @@ describe('resource batches', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.batches.list({ after: 'string', limit: 0 }, { path: '/_stainless_unknown_path' }), + openai.batches.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('cancel', async () => { - const responsePromise = openai.batches.cancel('string'); + const responsePromise = openai.batches.cancel('batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -89,7 +89,7 @@ describe('resource batches', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.batches.cancel('string', { path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(openai.batches.cancel('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index 4049f09b3..44ee2921d 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -23,10 +23,10 @@ describe('resource assistants', () => { test('create: required and optional params', async () => { const response = await openai.beta.assistants.create({ model: 'gpt-4-turbo', - description: 'string', - instructions: 'string', + description: 'description', + instructions: 'instructions', metadata: {}, - name: 'string', + name: 'name', response_format: 'none', temperature: 1, tool_resources: { @@ -44,7 +44,7 @@ describe('resource assistants', () => { }); test('retrieve', async () => { - const responsePromise = openai.beta.assistants.retrieve('string'); + const responsePromise = openai.beta.assistants.retrieve('assistant_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -57,12 +57,12 @@ describe('resource assistants', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.assistants.retrieve('string', { path: '/_stainless_unknown_path' }), + openai.beta.assistants.retrieve('assistant_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = openai.beta.assistants.update('string', {}); + const responsePromise = openai.beta.assistants.update('assistant_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -94,14 +94,14 @@ describe('resource assistants', () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( openai.beta.assistants.list( - { after: 'string', before: 'string', limit: 0, order: 'asc' }, + { after: 'after', before: 'before', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); }); test('del', async () => { - const responsePromise = openai.beta.assistants.del('string'); + const responsePromise = openai.beta.assistants.del('assistant_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -113,8 +113,8 @@ describe('resource assistants', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.beta.assistants.del('string', { path: '/_stainless_unknown_path' })).rejects.toThrow( - OpenAI.NotFoundError, - ); + await expect( + openai.beta.assistants.del('assistant_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/beta/threads/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts index 01268586c..0f2877af1 100644 --- a/tests/api-resources/beta/threads/messages.test.ts +++ b/tests/api-resources/beta/threads/messages.test.ts @@ -10,7 +10,7 @@ const openai = new OpenAI({ describe('resource messages', () => { test('create: only required params', async () => { - const responsePromise = openai.beta.threads.messages.create('string', { + const responsePromise = openai.beta.threads.messages.create('thread_id', { content: 'string', role: 'user', }); @@ -24,20 +24,20 @@ describe('resource messages', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.threads.messages.create('string', { + const response = await openai.beta.threads.messages.create('thread_id', { content: 'string', role: 'user', attachments: [ { - file_id: 'string', + file_id: 'file_id', tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], }, { - file_id: 'string', + file_id: 'file_id', tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], }, { - file_id: 'string', + file_id: 'file_id', tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], }, ], @@ -46,7 +46,7 @@ describe('resource messages', () => { }); test('retrieve', async () => { - const responsePromise = openai.beta.threads.messages.retrieve('string', 'string'); + const responsePromise = openai.beta.threads.messages.retrieve('thread_id', 'message_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -59,12 +59,12 @@ describe('resource messages', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.messages.retrieve('string', 'string', { path: '/_stainless_unknown_path' }), + openai.beta.threads.messages.retrieve('thread_id', 'message_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = openai.beta.threads.messages.update('string', 'string', {}); + const responsePromise = openai.beta.threads.messages.update('thread_id', 'message_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -75,7 +75,7 @@ describe('resource messages', () => { }); test('list', async () => { - const responsePromise = openai.beta.threads.messages.list('string'); + const responsePromise = openai.beta.threads.messages.list('thread_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -88,7 +88,7 @@ describe('resource messages', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.messages.list('string', { path: '/_stainless_unknown_path' }), + openai.beta.threads.messages.list('thread_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); @@ -96,15 +96,15 @@ describe('resource messages', () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( openai.beta.threads.messages.list( - 'string', - { after: 'string', before: 'string', limit: 0, order: 'asc', run_id: 'string' }, + 'thread_id', + { after: 'after', before: 'before', limit: 0, order: 'asc', run_id: 'run_id' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); }); test('del', async () => { - const responsePromise = openai.beta.threads.messages.del('string', 'string'); + const responsePromise = openai.beta.threads.messages.del('thread_id', 'message_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -117,7 +117,7 @@ describe('resource messages', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.messages.del('string', 'string', { path: '/_stainless_unknown_path' }), + openai.beta.threads.messages.del('thread_id', 'message_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 5aba82ff8..b422a9a3f 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -10,7 +10,7 @@ const openai = new OpenAI({ describe('resource runs', () => { test('create: only required params', async () => { - const responsePromise = openai.beta.threads.runs.create('string', { assistant_id: 'string' }); + const responsePromise = openai.beta.threads.runs.create('thread_id', { assistant_id: 'assistant_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,16 +21,16 @@ describe('resource runs', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.threads.runs.create('string', { - assistant_id: 'string', - additional_instructions: 'string', + const response = await openai.beta.threads.runs.create('thread_id', { + assistant_id: 'assistant_id', + additional_instructions: 'additional_instructions', additional_messages: [ { role: 'user', content: 'string', attachments: [ { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -38,7 +38,7 @@ describe('resource runs', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -46,7 +46,7 @@ describe('resource runs', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -61,7 +61,7 @@ describe('resource runs', () => { content: 'string', attachments: [ { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -69,7 +69,7 @@ describe('resource runs', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -77,7 +77,7 @@ describe('resource runs', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -92,7 +92,7 @@ describe('resource runs', () => { content: 'string', attachments: [ { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -100,7 +100,7 @@ describe('resource runs', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -108,7 +108,7 @@ describe('resource runs', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -119,7 +119,7 @@ describe('resource runs', () => { metadata: {}, }, ], - instructions: 'string', + instructions: 'instructions', max_completion_tokens: 256, max_prompt_tokens: 256, metadata: {}, @@ -136,7 +136,7 @@ describe('resource runs', () => { }); test('retrieve', async () => { - const responsePromise = openai.beta.threads.runs.retrieve('string', 'string'); + const responsePromise = openai.beta.threads.runs.retrieve('thread_id', 'run_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -149,12 +149,12 @@ describe('resource runs', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.runs.retrieve('string', 'string', { path: '/_stainless_unknown_path' }), + openai.beta.threads.runs.retrieve('thread_id', 'run_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = openai.beta.threads.runs.update('string', 'string', {}); + const responsePromise = openai.beta.threads.runs.update('thread_id', 'run_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -165,7 +165,7 @@ describe('resource runs', () => { }); test('list', async () => { - const responsePromise = openai.beta.threads.runs.list('string'); + const responsePromise = openai.beta.threads.runs.list('thread_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -178,7 +178,7 @@ describe('resource runs', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.runs.list('string', { path: '/_stainless_unknown_path' }), + openai.beta.threads.runs.list('thread_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); @@ -186,15 +186,15 @@ describe('resource runs', () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( openai.beta.threads.runs.list( - 'string', - { after: 'string', before: 'string', limit: 0, order: 'asc' }, + 'thread_id', + { after: 'after', before: 'before', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); }); test('cancel', async () => { - const responsePromise = openai.beta.threads.runs.cancel('string', 'string'); + const responsePromise = openai.beta.threads.runs.cancel('thread_id', 'run_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -207,12 +207,12 @@ describe('resource runs', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.runs.cancel('string', 'string', { path: '/_stainless_unknown_path' }), + openai.beta.threads.runs.cancel('thread_id', 'run_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('submitToolOutputs: only required params', async () => { - const responsePromise = openai.beta.threads.runs.submitToolOutputs('string', 'string', { + const responsePromise = openai.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { tool_outputs: [{}, {}, {}], }); const rawResponse = await responsePromise.asResponse(); @@ -225,11 +225,11 @@ describe('resource runs', () => { }); test('submitToolOutputs: required and optional params', async () => { - const response = await openai.beta.threads.runs.submitToolOutputs('string', 'string', { + const response = await openai.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { tool_outputs: [ - { tool_call_id: 'string', output: 'string' }, - { tool_call_id: 'string', output: 'string' }, - { tool_call_id: 'string', output: 'string' }, + { tool_call_id: 'tool_call_id', output: 'output' }, + { tool_call_id: 'tool_call_id', output: 'output' }, + { tool_call_id: 'tool_call_id', output: 'output' }, ], stream: false, }); diff --git a/tests/api-resources/beta/threads/runs/steps.test.ts b/tests/api-resources/beta/threads/runs/steps.test.ts index 76495a1a3..1981d67fd 100644 --- a/tests/api-resources/beta/threads/runs/steps.test.ts +++ b/tests/api-resources/beta/threads/runs/steps.test.ts @@ -10,7 +10,7 @@ const openai = new OpenAI({ describe('resource steps', () => { test('retrieve', async () => { - const responsePromise = openai.beta.threads.runs.steps.retrieve('string', 'string', 'string'); + const responsePromise = openai.beta.threads.runs.steps.retrieve('thread_id', 'run_id', 'step_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -23,14 +23,14 @@ describe('resource steps', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.runs.steps.retrieve('string', 'string', 'string', { + openai.beta.threads.runs.steps.retrieve('thread_id', 'run_id', 'step_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('list', async () => { - const responsePromise = openai.beta.threads.runs.steps.list('string', 'string'); + const responsePromise = openai.beta.threads.runs.steps.list('thread_id', 'run_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -43,7 +43,7 @@ describe('resource steps', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.runs.steps.list('string', 'string', { path: '/_stainless_unknown_path' }), + openai.beta.threads.runs.steps.list('thread_id', 'run_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); @@ -51,9 +51,9 @@ describe('resource steps', () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( openai.beta.threads.runs.steps.list( - 'string', - 'string', - { after: 'string', before: 'string', limit: 0, order: 'asc' }, + 'thread_id', + 'run_id', + { after: 'after', before: 'before', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index 85f89533c..0d2d93a61 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -38,7 +38,7 @@ describe('resource threads', () => { content: 'string', attachments: [ { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -46,7 +46,7 @@ describe('resource threads', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -54,7 +54,7 @@ describe('resource threads', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -69,7 +69,7 @@ describe('resource threads', () => { content: 'string', attachments: [ { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -77,7 +77,7 @@ describe('resource threads', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -85,7 +85,7 @@ describe('resource threads', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -100,7 +100,7 @@ describe('resource threads', () => { content: 'string', attachments: [ { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -108,7 +108,7 @@ describe('resource threads', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -116,7 +116,7 @@ describe('resource threads', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -148,7 +148,7 @@ describe('resource threads', () => { }); test('retrieve', async () => { - const responsePromise = openai.beta.threads.retrieve('string'); + const responsePromise = openai.beta.threads.retrieve('thread_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -161,12 +161,12 @@ describe('resource threads', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.retrieve('string', { path: '/_stainless_unknown_path' }), + openai.beta.threads.retrieve('thread_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = openai.beta.threads.update('string', {}); + const responsePromise = openai.beta.threads.update('thread_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -177,7 +177,7 @@ describe('resource threads', () => { }); test('del', async () => { - const responsePromise = openai.beta.threads.del('string'); + const responsePromise = openai.beta.threads.del('thread_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -189,13 +189,13 @@ describe('resource threads', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.beta.threads.del('string', { path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(openai.beta.threads.del('thread_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); test('createAndRun: only required params', async () => { - const responsePromise = openai.beta.threads.createAndRun({ assistant_id: 'string' }); + const responsePromise = openai.beta.threads.createAndRun({ assistant_id: 'assistant_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -207,8 +207,8 @@ describe('resource threads', () => { test('createAndRun: required and optional params', async () => { const response = await openai.beta.threads.createAndRun({ - assistant_id: 'string', - instructions: 'string', + assistant_id: 'assistant_id', + instructions: 'instructions', max_completion_tokens: 256, max_prompt_tokens: 256, metadata: {}, @@ -224,7 +224,7 @@ describe('resource threads', () => { content: 'string', attachments: [ { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -232,7 +232,7 @@ describe('resource threads', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -240,7 +240,7 @@ describe('resource threads', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -255,7 +255,7 @@ describe('resource threads', () => { content: 'string', attachments: [ { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -263,7 +263,7 @@ describe('resource threads', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -271,7 +271,7 @@ describe('resource threads', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -286,7 +286,7 @@ describe('resource threads', () => { content: 'string', attachments: [ { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -294,7 +294,7 @@ describe('resource threads', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, @@ -302,7 +302,7 @@ describe('resource threads', () => { ], }, { - file_id: 'string', + file_id: 'file_id', tools: [ { type: 'code_interpreter' }, { type: 'code_interpreter' }, diff --git a/tests/api-resources/beta/vector-stores/file-batches.test.ts b/tests/api-resources/beta/vector-stores/file-batches.test.ts index b8ff697b7..33bfd2ef7 100644 --- a/tests/api-resources/beta/vector-stores/file-batches.test.ts +++ b/tests/api-resources/beta/vector-stores/file-batches.test.ts @@ -50,7 +50,7 @@ describe('resource fileBatches', () => { }); test('cancel', async () => { - const responsePromise = openai.beta.vectorStores.fileBatches.cancel('string', 'string'); + const responsePromise = openai.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -63,12 +63,14 @@ describe('resource fileBatches', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.fileBatches.cancel('string', 'string', { path: '/_stainless_unknown_path' }), + openai.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { + path: '/_stainless_unknown_path', + }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('listFiles', async () => { - const responsePromise = openai.beta.vectorStores.fileBatches.listFiles('string', 'string'); + const responsePromise = openai.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -81,7 +83,7 @@ describe('resource fileBatches', () => { test('listFiles: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.fileBatches.listFiles('string', 'string', { + openai.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); @@ -91,9 +93,9 @@ describe('resource fileBatches', () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( openai.beta.vectorStores.fileBatches.listFiles( - 'string', - 'string', - { after: 'string', before: 'string', filter: 'in_progress', limit: 0, order: 'asc' }, + 'vector_store_id', + 'batch_id', + { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); diff --git a/tests/api-resources/beta/vector-stores/files.test.ts b/tests/api-resources/beta/vector-stores/files.test.ts index 60906dac3..4b21aed30 100644 --- a/tests/api-resources/beta/vector-stores/files.test.ts +++ b/tests/api-resources/beta/vector-stores/files.test.ts @@ -10,7 +10,7 @@ const openai = new OpenAI({ describe('resource files', () => { test('create: only required params', async () => { - const responsePromise = openai.beta.vectorStores.files.create('vs_abc123', { file_id: 'string' }); + const responsePromise = openai.beta.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -22,7 +22,7 @@ describe('resource files', () => { test('create: required and optional params', async () => { const response = await openai.beta.vectorStores.files.create('vs_abc123', { - file_id: 'string', + file_id: 'file_id', chunking_strategy: { type: 'auto' }, }); }); @@ -48,7 +48,7 @@ describe('resource files', () => { }); test('list', async () => { - const responsePromise = openai.beta.vectorStores.files.list('string'); + const responsePromise = openai.beta.vectorStores.files.list('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -61,7 +61,7 @@ describe('resource files', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.files.list('string', { path: '/_stainless_unknown_path' }), + openai.beta.vectorStores.files.list('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); @@ -69,15 +69,15 @@ describe('resource files', () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( openai.beta.vectorStores.files.list( - 'string', - { after: 'string', before: 'string', filter: 'in_progress', limit: 0, order: 'asc' }, + 'vector_store_id', + { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); }); test('del', async () => { - const responsePromise = openai.beta.vectorStores.files.del('string', 'string'); + const responsePromise = openai.beta.vectorStores.files.del('vector_store_id', 'file_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -90,7 +90,7 @@ describe('resource files', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.files.del('string', 'string', { path: '/_stainless_unknown_path' }), + openai.beta.vectorStores.files.del('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/beta/vector-stores/vector-stores.test.ts b/tests/api-resources/beta/vector-stores/vector-stores.test.ts index 445fa9ebf..11dcceef8 100644 --- a/tests/api-resources/beta/vector-stores/vector-stores.test.ts +++ b/tests/api-resources/beta/vector-stores/vector-stores.test.ts @@ -21,7 +21,7 @@ describe('resource vectorStores', () => { }); test('retrieve', async () => { - const responsePromise = openai.beta.vectorStores.retrieve('string'); + const responsePromise = openai.beta.vectorStores.retrieve('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -34,12 +34,12 @@ describe('resource vectorStores', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.retrieve('string', { path: '/_stainless_unknown_path' }), + openai.beta.vectorStores.retrieve('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = openai.beta.vectorStores.update('string', {}); + const responsePromise = openai.beta.vectorStores.update('vector_store_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -71,14 +71,14 @@ describe('resource vectorStores', () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( openai.beta.vectorStores.list( - { after: 'string', before: 'string', limit: 0, order: 'asc' }, + { after: 'after', before: 'before', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); }); test('del', async () => { - const responsePromise = openai.beta.vectorStores.del('string'); + const responsePromise = openai.beta.vectorStores.del('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -91,7 +91,7 @@ describe('resource vectorStores', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.del('string', { path: '/_stainless_unknown_path' }), + openai.beta.vectorStores.del('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 56c882d17..66ef2d023 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -11,7 +11,7 @@ const openai = new OpenAI({ describe('resource completions', () => { test('create: only required params', async () => { const responsePromise = openai.chat.completions.create({ - messages: [{ content: 'string', role: 'system' }], + messages: [{ content: 'content', role: 'system' }], model: 'gpt-4-turbo', }); const rawResponse = await responsePromise.asResponse(); @@ -25,11 +25,11 @@ describe('resource completions', () => { test('create: required and optional params', async () => { const response = await openai.chat.completions.create({ - messages: [{ content: 'string', role: 'system', name: 'string' }], + messages: [{ content: 'content', role: 'system', name: 'name' }], model: 'gpt-4-turbo', frequency_penalty: -2, function_call: 'none', - functions: [{ description: 'string', name: 'string', parameters: { foo: 'bar' } }], + functions: [{ description: 'description', name: 'name', parameters: { foo: 'bar' } }], logit_bias: { foo: 0 }, logprobs: true, max_tokens: 0, @@ -45,9 +45,18 @@ describe('resource completions', () => { temperature: 1, tool_choice: 'none', tools: [ - { type: 'function', function: { description: 'string', name: 'string', parameters: { foo: 'bar' } } }, - { type: 'function', function: { description: 'string', name: 'string', parameters: { foo: 'bar' } } }, - { type: 'function', function: { description: 'string', name: 'string', parameters: { foo: 'bar' } } }, + { + type: 'function', + function: { description: 'description', name: 'name', parameters: { foo: 'bar' } }, + }, + { + type: 'function', + function: { description: 'description', name: 'name', parameters: { foo: 'bar' } }, + }, + { + type: 'function', + function: { description: 'description', name: 'name', parameters: { foo: 'bar' } }, + }, ], top_logprobs: 0, top_p: 1, diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index 2fda1c947..55eded995 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -31,7 +31,7 @@ describe('resource files', () => { }); test('retrieve', async () => { - const responsePromise = openai.files.retrieve('string'); + const responsePromise = openai.files.retrieve('file_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -43,7 +43,7 @@ describe('resource files', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.files.retrieve('string', { path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(openai.files.retrieve('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); @@ -69,12 +69,12 @@ describe('resource files', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.files.list({ purpose: 'string' }, { path: '/_stainless_unknown_path' }), + openai.files.list({ purpose: 'purpose' }, { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('del', async () => { - const responsePromise = openai.files.del('string'); + const responsePromise = openai.files.del('file_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -86,20 +86,20 @@ describe('resource files', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.files.del('string', { path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(openai.files.del('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); test('content: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.files.content('string', { path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(openai.files.content('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); test('retrieveContent', async () => { - const responsePromise = openai.files.retrieveContent('string'); + const responsePromise = openai.files.retrieveContent('file_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -112,7 +112,7 @@ describe('resource files', () => { test('retrieveContent: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.files.retrieveContent('string', { path: '/_stainless_unknown_path' }), + openai.files.retrieveContent('file_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts b/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts index 1844d7c87..3a01448e2 100644 --- a/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts +++ b/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts @@ -34,7 +34,7 @@ describe('resource checkpoints', () => { await expect( openai.fineTuning.jobs.checkpoints.list( 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', - { after: 'string', limit: 0 }, + { after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); diff --git a/tests/api-resources/fine-tuning/jobs/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts index d2207cd97..c14912c3a 100644 --- a/tests/api-resources/fine-tuning/jobs/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -33,8 +33,8 @@ describe('resource jobs', () => { type: 'wandb', wandb: { project: 'my-wandb-project', - name: 'string', - entity: 'string', + name: 'name', + entity: 'entity', tags: ['custom-tag', 'custom-tag', 'custom-tag'], }, }, @@ -42,8 +42,8 @@ describe('resource jobs', () => { type: 'wandb', wandb: { project: 'my-wandb-project', - name: 'string', - entity: 'string', + name: 'name', + entity: 'entity', tags: ['custom-tag', 'custom-tag', 'custom-tag'], }, }, @@ -51,8 +51,8 @@ describe('resource jobs', () => { type: 'wandb', wandb: { project: 'my-wandb-project', - name: 'string', - entity: 'string', + name: 'name', + entity: 'entity', tags: ['custom-tag', 'custom-tag', 'custom-tag'], }, }, @@ -102,7 +102,7 @@ describe('resource jobs', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.fineTuning.jobs.list({ after: 'string', limit: 0 }, { path: '/_stainless_unknown_path' }), + openai.fineTuning.jobs.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); @@ -147,7 +147,7 @@ describe('resource jobs', () => { await expect( openai.fineTuning.jobs.listEvents( 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', - { after: 'string', limit: 0 }, + { after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); From 5873a017f0f2040ef97040a8df19c5b4dc2a66fd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 14:29:16 +0000 Subject: [PATCH 162/533] release: 4.52.7 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 4e9f7a5f0..dc058ce75 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.52.6" + ".": "4.52.7" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 387555c91..7bff5e4eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.52.7 (2024-07-11) + +Full Changelog: [v4.52.6...v4.52.7](https://github.com/openai/openai-node/compare/v4.52.6...v4.52.7) + +### Documentation + +* **examples:** update example values ([#933](https://github.com/openai/openai-node/issues/933)) ([92512ab](https://github.com/openai/openai-node/commit/92512abcd7ab5d7c452dfae007c3a25041062656)) + ## 4.52.6 (2024-07-11) Full Changelog: [v4.52.5...v4.52.6](https://github.com/openai/openai-node/compare/v4.52.5...v4.52.6) diff --git a/README.md b/README.md index db66d4303..6d971d138 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.52.6/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.52.7/mod.ts'; ``` diff --git a/package.json b/package.json index abbaaaf7c..1f00b2180 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.52.6", + "version": "4.52.7", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 7e8dbdc6e..ba47751d8 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.52.6/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.52.7/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 8a81ba44e..b9a220285 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.52.6'; // x-release-please-version +export const VERSION = '4.52.7'; // x-release-please-version From c39ee6d89b7dec5c88cd99af3e72de8c13f78fcd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2024 13:42:54 +0000 Subject: [PATCH 163/533] chore(docs): mention support of web browser runtimes (#938) --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index 6d971d138..3ad75fbda 100644 --- a/README.md +++ b/README.md @@ -617,6 +617,18 @@ The following runtimes are supported: - Vercel Edge Runtime. - Jest 28 or greater with the `"node"` environment (`"jsdom"` is not supported at this time). - Nitro v2.6 or greater. +- Web browsers: disabled by default to avoid exposing your secret API credentials. Enable browser support by explicitly setting `dangerouslyAllowBrowser` to true'. +
+ More explanation + ### Why is this dangerous? + Enabling the `dangerouslyAllowBrowser` option can be dangerous because it exposes your secret API credentials in the client-side code. Web browsers are inherently less secure than server environments, + any user with access to the browser can potentially inspect, extract, and misuse these credentials. This could lead to unauthorized access using your credentials and potentially compromise sensitive data or functionality. + ### When might this not be dangerous? + In certain scenarios where enabling browser support might not pose significant risks: + - Internal Tools: If the application is used solely within a controlled internal environment where the users are trusted, the risk of credential exposure can be mitigated. + - Public APIs with Limited Scope: If your API has very limited scope and the exposed credentials do not grant access to sensitive data or critical operations, the potential impact of exposure is reduced. + - Development or debugging purpose: Enabling this feature temporarily might be acceptable, provided the credentials are short-lived, aren't also used in production environments, or are frequently rotated. +
Note that React Native is not supported at this time. From 85b651080926e74a91fc6effc696103baaf905a2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 18 Jul 2024 16:35:33 +0000 Subject: [PATCH 164/533] chore(docs): use client instead of package name in Node examples (#941) --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 3ad75fbda..3d303e5b0 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ The full API of this library can be found in [api.md file](api.md) along with ma ```js import OpenAI from 'openai'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted }); @@ -53,7 +53,7 @@ We provide support for streaming responses using Server Sent Events (SSE). ```ts import OpenAI from 'openai'; -const openai = new OpenAI(); +const client = new OpenAI(); async function main() { const stream = await openai.chat.completions.create({ @@ -80,7 +80,7 @@ This library includes TypeScript definitions for all request params and response ```ts import OpenAI from 'openai'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted }); @@ -301,7 +301,7 @@ import fs from 'fs'; import fetch from 'node-fetch'; import OpenAI, { toFile } from 'openai'; -const openai = new OpenAI(); +const client = new OpenAI(); // If you have access to Node `fs` we recommend using `fs.createReadStream()`: await openai.files.create({ file: fs.createReadStream('input.jsonl'), purpose: 'fine-tune' }); @@ -399,7 +399,7 @@ You can use the `maxRetries` option to configure or disable this: ```js // Configure the default for all requests: -const openai = new OpenAI({ +const client = new OpenAI({ maxRetries: 0, // default is 2 }); @@ -416,7 +416,7 @@ Requests time out after 10 minutes by default. You can configure this with a `ti ```ts // Configure the default for all requests: -const openai = new OpenAI({ +const client = new OpenAI({ timeout: 20 * 1000, // 20 seconds (default is 10 minutes) }); @@ -471,7 +471,7 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi ```ts -const openai = new OpenAI(); +const client = new OpenAI(); const response = await openai.chat.completions .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' }) @@ -582,7 +582,7 @@ import http from 'http'; import { HttpsProxyAgent } from 'https-proxy-agent'; // Configure the default for all requests: -const openai = new OpenAI({ +const client = new OpenAI({ httpAgent: new HttpsProxyAgent(process.env.PROXY_URL), }); From a56fdb8e656fa1c306f1bb7fda595b37c5e9b3ed Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 19 Jul 2024 14:54:27 +0000 Subject: [PATCH 165/533] feat(api): add new gpt-4o-mini models (#942) --- .stats.yml | 2 +- src/resources/beta/assistants.ts | 2 ++ src/resources/beta/threads/runs/runs.ts | 2 ++ src/resources/beta/threads/threads.ts | 2 ++ src/resources/chat/chat.ts | 2 ++ 5 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 57f5afaff..27e2ce5ed 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-27d8d6da893c1cdd53b491ec05153df22b1e113965f253a1d6eb8d75b628173f.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-518ca6c60061d3e8bc0971facf40d752f2aea62e3522cc168ad29a1f29cab3dd.yml diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 5d326a593..abacfd06e 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -1057,6 +1057,8 @@ export interface AssistantCreateParams { | (string & {}) | 'gpt-4o' | 'gpt-4o-2024-05-13' + | 'gpt-4o-mini' + | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index b4ed09cc2..24b6ce4a2 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -672,6 +672,8 @@ export interface RunCreateParamsBase { | (string & {}) | 'gpt-4o' | 'gpt-4o-2024-05-13' + | 'gpt-4o-mini' + | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index aded9daf1..04ce7b57d 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -549,6 +549,8 @@ export interface ThreadCreateAndRunParamsBase { | (string & {}) | 'gpt-4o' | 'gpt-4o-2024-05-13' + | 'gpt-4o-mini' + | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index da4e90d42..74cda326e 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -11,6 +11,8 @@ export class Chat extends APIResource { export type ChatModel = | 'gpt-4o' | 'gpt-4o-2024-05-13' + | 'gpt-4o-mini' + | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' From c770e34b4c01321e5459b30d72d8615e6755798b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 11:10:38 +0000 Subject: [PATCH 166/533] feat(api): add uploads endpoints (#946) --- .stats.yml | 4 +- api.md | 22 +++ src/index.ts | 6 + src/resources/chat/completions.ts | 1 + src/resources/index.ts | 1 + src/resources/uploads/index.ts | 4 + src/resources/uploads/parts.ts | 68 ++++++++ src/resources/uploads/uploads.ts | 169 ++++++++++++++++++++ tests/api-resources/uploads/parts.test.ts | 30 ++++ tests/api-resources/uploads/uploads.test.ts | 74 +++++++++ 10 files changed, 377 insertions(+), 2 deletions(-) create mode 100644 src/resources/uploads/index.ts create mode 100644 src/resources/uploads/parts.ts create mode 100644 src/resources/uploads/uploads.ts create mode 100644 tests/api-resources/uploads/parts.test.ts create mode 100644 tests/api-resources/uploads/uploads.test.ts diff --git a/.stats.yml b/.stats.yml index 27e2ce5ed..4e4cb5509 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-518ca6c60061d3e8bc0971facf40d752f2aea62e3522cc168ad29a1f29cab3dd.yml +configured_endpoints: 68 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-77cfff37114bc9f141c7e6107eb5f1b38d8cc99bc3d4ce03a066db2b6b649c69.yml diff --git a/api.md b/api.md index 17a3f9632..ddc9fce38 100644 --- a/api.md +++ b/api.md @@ -388,3 +388,25 @@ Methods: - client.batches.retrieve(batchId) -> Batch - client.batches.list({ ...params }) -> BatchesPage - client.batches.cancel(batchId) -> Batch + +# Uploads + +Types: + +- Upload + +Methods: + +- client.uploads.create({ ...params }) -> Upload +- client.uploads.cancel(uploadId) -> Upload +- client.uploads.complete(uploadId, { ...params }) -> Upload + +## Parts + +Types: + +- UploadPart + +Methods: + +- client.uploads.parts.create(uploadId, { ...params }) -> UploadPart diff --git a/src/index.ts b/src/index.ts index ce455108e..7e5df0505 100644 --- a/src/index.ts +++ b/src/index.ts @@ -164,6 +164,7 @@ export class OpenAI extends Core.APIClient { fineTuning: API.FineTuning = new API.FineTuning(this); beta: API.Beta = new API.Beta(this); batches: API.Batches = new API.Batches(this); + uploads: API.Uploads = new API.Uploads(this); protected override defaultQuery(): Core.DefaultQuery | undefined { return this._options.defaultQuery; @@ -309,6 +310,11 @@ export namespace OpenAI { export import BatchCreateParams = API.BatchCreateParams; export import BatchListParams = API.BatchListParams; + export import Uploads = API.Uploads; + export import Upload = API.Upload; + export import UploadCreateParams = API.UploadCreateParams; + export import UploadCompleteParams = API.UploadCompleteParams; + export import ErrorObject = API.ErrorObject; export import FunctionDefinition = API.FunctionDefinition; export import FunctionParameters = API.FunctionParameters; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 44eb9520c..4027e995b 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -820,6 +820,7 @@ export interface ChatCompletionCreateParamsBase { * exhausted. * - If set to 'default', the request will be processed using the default service * tier with a lower uptime SLA and no latency guarentee. + * - When not set, the default behavior is 'auto'. * * When this parameter is set, the response body will include the `service_tier` * utilized. diff --git a/src/resources/index.ts b/src/resources/index.ts index 6f8e8564c..9f2a3cbe7 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -43,3 +43,4 @@ export { } from './images'; export { Model, ModelDeleted, ModelsPage, Models } from './models'; export { Moderation, ModerationCreateResponse, ModerationCreateParams, Moderations } from './moderations'; +export { Upload, UploadCreateParams, UploadCompleteParams, Uploads } from './uploads/uploads'; diff --git a/src/resources/uploads/index.ts b/src/resources/uploads/index.ts new file mode 100644 index 000000000..1a353d312 --- /dev/null +++ b/src/resources/uploads/index.ts @@ -0,0 +1,4 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Upload, UploadCreateParams, UploadCompleteParams, Uploads } from './uploads'; +export { UploadPart, PartCreateParams, Parts } from './parts'; diff --git a/src/resources/uploads/parts.ts b/src/resources/uploads/parts.ts new file mode 100644 index 000000000..a4af5c606 --- /dev/null +++ b/src/resources/uploads/parts.ts @@ -0,0 +1,68 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as Core from '../../core'; +import * as PartsAPI from './parts'; + +export class Parts extends APIResource { + /** + * Adds a + * [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an + * [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. + * A Part represents a chunk of bytes from the file you are trying to upload. + * + * Each Part can be at most 64 MB, and you can add Parts until you hit the Upload + * maximum of 8 GB. + * + * It is possible to add multiple Parts in parallel. You can decide the intended + * order of the Parts when you + * [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). + */ + create( + uploadId: string, + body: PartCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post( + `/uploads/${uploadId}/parts`, + Core.multipartFormRequestOptions({ body, ...options }), + ); + } +} + +/** + * The upload Part represents a chunk of bytes we can add to an Upload object. + */ +export interface UploadPart { + /** + * The upload Part unique identifier, which can be referenced in API endpoints. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the Part was created. + */ + created_at: number; + + /** + * The object type, which is always `upload.part`. + */ + object: 'upload.part'; + + /** + * The ID of the Upload object that this Part was added to. + */ + upload_id: string; +} + +export interface PartCreateParams { + /** + * The chunk of bytes for this Part. + */ + data: Core.Uploadable; +} + +export namespace Parts { + export import UploadPart = PartsAPI.UploadPart; + export import PartCreateParams = PartsAPI.PartCreateParams; +} diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts new file mode 100644 index 000000000..ceb2b6d23 --- /dev/null +++ b/src/resources/uploads/uploads.ts @@ -0,0 +1,169 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as Core from '../../core'; +import * as UploadsAPI from './uploads'; +import * as FilesAPI from '../files'; +import * as PartsAPI from './parts'; + +export class Uploads extends APIResource { + parts: PartsAPI.Parts = new PartsAPI.Parts(this._client); + + /** + * Creates an intermediate + * [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object + * that you can add + * [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. + * Currently, an Upload can accept at most 8 GB in total and expires after an hour + * after you create it. + * + * Once you complete the Upload, we will create a + * [File](https://platform.openai.com/docs/api-reference/files/object) object that + * contains all the parts you uploaded. This File is usable in the rest of our + * platform as a regular File object. + * + * For certain `purpose`s, the correct `mime_type` must be specified. Please refer + * to documentation for the supported MIME types for your use case: + * + * - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files) + * + * For guidance on the proper filename extensions for each purpose, please follow + * the documentation on + * [creating a File](https://platform.openai.com/docs/api-reference/files/create). + */ + create(body: UploadCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/uploads', { body, ...options }); + } + + /** + * Cancels the Upload. No Parts may be added after an Upload is cancelled. + */ + cancel(uploadId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/uploads/${uploadId}/cancel`, options); + } + + /** + * Completes the + * [Upload](https://platform.openai.com/docs/api-reference/uploads/object). + * + * Within the returned Upload object, there is a nested + * [File](https://platform.openai.com/docs/api-reference/files/object) object that + * is ready to use in the rest of the platform. + * + * You can specify the order of the Parts by passing in an ordered list of the Part + * IDs. + * + * The number of bytes uploaded upon completion must match the number of bytes + * initially specified when creating the Upload object. No Parts may be added after + * an Upload is completed. + */ + complete( + uploadId: string, + body: UploadCompleteParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/uploads/${uploadId}/complete`, { body, ...options }); + } +} + +/** + * The Upload object can accept byte chunks in the form of Parts. + */ +export interface Upload { + /** + * The Upload unique identifier, which can be referenced in API endpoints. + */ + id: string; + + /** + * The intended number of bytes to be uploaded. + */ + bytes: number; + + /** + * The Unix timestamp (in seconds) for when the Upload was created. + */ + created_at: number; + + /** + * The Unix timestamp (in seconds) for when the Upload was created. + */ + expires_at: number; + + /** + * The name of the file to be uploaded. + */ + filename: string; + + /** + * The object type, which is always "upload". + */ + object: 'upload'; + + /** + * The intended purpose of the file. + * [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) + * for acceptable values. + */ + purpose: string; + + /** + * The status of the Upload. + */ + status: 'pending' | 'completed' | 'cancelled' | 'expired'; + + /** + * The ready File object after the Upload is completed. + */ + file?: FilesAPI.FileObject | null; +} + +export interface UploadCreateParams { + /** + * The number of bytes in the file you are uploading. + */ + bytes: number; + + /** + * The name of the file to upload. + */ + filename: string; + + /** + * The MIME type of the file. + * + * This must fall within the supported MIME types for your file purpose. See the + * supported MIME types for assistants and vision. + */ + mime_type: string; + + /** + * The intended purpose of the uploaded file. + * + * See the + * [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + */ + purpose: 'assistants' | 'batch' | 'fine-tune' | 'vision'; +} + +export interface UploadCompleteParams { + /** + * The ordered list of Part IDs. + */ + part_ids: Array; + + /** + * The optional md5 checksum for the file contents to verify if the bytes uploaded + * matches what you expect. + */ + md5?: string; +} + +export namespace Uploads { + export import Upload = UploadsAPI.Upload; + export import UploadCreateParams = UploadsAPI.UploadCreateParams; + export import UploadCompleteParams = UploadsAPI.UploadCompleteParams; + export import Parts = PartsAPI.Parts; + export import UploadPart = PartsAPI.UploadPart; + export import PartCreateParams = PartsAPI.PartCreateParams; +} diff --git a/tests/api-resources/uploads/parts.test.ts b/tests/api-resources/uploads/parts.test.ts new file mode 100644 index 000000000..5e69c5861 --- /dev/null +++ b/tests/api-resources/uploads/parts.test.ts @@ -0,0 +1,30 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI, { toFile } from 'openai'; +import { Response } from 'node-fetch'; + +const openai = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource parts', () => { + test('create: only required params', async () => { + const responsePromise = openai.uploads.parts.create('upload_abc123', { + data: await toFile(Buffer.from('# my file contents'), 'README.md'), + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await openai.uploads.parts.create('upload_abc123', { + data: await toFile(Buffer.from('# my file contents'), 'README.md'), + }); + }); +}); diff --git a/tests/api-resources/uploads/uploads.test.ts b/tests/api-resources/uploads/uploads.test.ts new file mode 100644 index 000000000..08f059d1b --- /dev/null +++ b/tests/api-resources/uploads/uploads.test.ts @@ -0,0 +1,74 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const openai = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource uploads', () => { + test('create: only required params', async () => { + const responsePromise = openai.uploads.create({ + bytes: 0, + filename: 'filename', + mime_type: 'mime_type', + purpose: 'assistants', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await openai.uploads.create({ + bytes: 0, + filename: 'filename', + mime_type: 'mime_type', + purpose: 'assistants', + }); + }); + + test('cancel', async () => { + const responsePromise = openai.uploads.cancel('upload_abc123'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('cancel: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + openai.uploads.cancel('upload_abc123', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('complete: only required params', async () => { + const responsePromise = openai.uploads.complete('upload_abc123', { + part_ids: ['string', 'string', 'string'], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('complete: required and optional params', async () => { + const response = await openai.uploads.complete('upload_abc123', { + part_ids: ['string', 'string', 'string'], + md5: 'md5', + }); + }); +}); From 22896e88e97c0e8621bf74c666a6fc7d9d832267 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 14:12:13 +0000 Subject: [PATCH 167/533] release: 4.53.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 15 +++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 20 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index dc058ce75..bdcacbf65 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.52.7" + ".": "4.53.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7bff5e4eb..00fc14e39 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 4.53.0 (2024-07-22) + +Full Changelog: [v4.52.7...v4.53.0](https://github.com/openai/openai-node/compare/v4.52.7...v4.53.0) + +### Features + +* **api:** add new gpt-4o-mini models ([#942](https://github.com/openai/openai-node/issues/942)) ([7ac10dd](https://github.com/openai/openai-node/commit/7ac10ddbb87e9eb0e8e34d58a13a4775cbba1c24)) +* **api:** add uploads endpoints ([#946](https://github.com/openai/openai-node/issues/946)) ([8709ceb](https://github.com/openai/openai-node/commit/8709ceb0e01c5a1f96704c998f35ca1117ecadac)) + + +### Chores + +* **docs:** mention support of web browser runtimes ([#938](https://github.com/openai/openai-node/issues/938)) ([123d19d](https://github.com/openai/openai-node/commit/123d19d5a157110c8ada556c107caf0eb8b2ccc6)) +* **docs:** use client instead of package name in Node examples ([#941](https://github.com/openai/openai-node/issues/941)) ([8b5db1f](https://github.com/openai/openai-node/commit/8b5db1f53e66ce4b6e554f40a8dd2fd474085027)) + ## 4.52.7 (2024-07-11) Full Changelog: [v4.52.6...v4.52.7](https://github.com/openai/openai-node/compare/v4.52.6...v4.52.7) diff --git a/README.md b/README.md index 3d303e5b0..bf6f5ac76 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.52.7/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.53.0/mod.ts'; ``` diff --git a/package.json b/package.json index 1f00b2180..40d45a905 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.52.7", + "version": "4.53.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index ba47751d8..930f16d53 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.52.7/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.53.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index b9a220285..0f53607fc 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.52.7'; // x-release-please-version +export const VERSION = '4.53.0'; // x-release-please-version From 5b7677120b4fef41c0eb78dbf6c8a6bf480b2028 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 23 Jul 2024 10:34:27 +0000 Subject: [PATCH 168/533] chore(tests): update prism version (#948) --- scripts/mock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mock b/scripts/mock index fe89a1d08..f58615769 100755 --- a/scripts/mock +++ b/scripts/mock @@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}" # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then - npm exec --package=@stoplight/prism-cli@~5.8 -- prism mock "$URL" &> .prism.log & + npm exec --package=@stainless-api/prism-cli@5.8.4 -- prism mock "$URL" &> .prism.log & # Wait for server to come online echo -n "Waiting for server" @@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then echo else - npm exec --package=@stoplight/prism-cli@~5.8 -- prism mock "$URL" + npm exec --package=@stainless-api/prism-cli@5.8.4 -- prism mock "$URL" fi From f7b35843abc491b42f908a171256200116858195 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 25 Jul 2024 16:34:53 +0000 Subject: [PATCH 169/533] fix(compat): remove ReadableStream polyfill redundant since node v16 (#954) Note that this breaks streaming in Node v14, which has been unsupported since v4.0.0 of this library. --- package.json | 3 +-- src/_shims/node-runtime.ts | 4 +--- yarn.lock | 5 ----- 3 files changed, 2 insertions(+), 10 deletions(-) diff --git a/package.json b/package.json index 40d45a905..1ea96ee7b 100644 --- a/package.json +++ b/package.json @@ -30,8 +30,7 @@ "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7", - "web-streams-polyfill": "^3.2.1" + "node-fetch": "^2.6.7" }, "devDependencies": { "@swc/core": "^1.3.102", diff --git a/src/_shims/node-runtime.ts b/src/_shims/node-runtime.ts index a9c42ebeb..ab9f2ab5c 100644 --- a/src/_shims/node-runtime.ts +++ b/src/_shims/node-runtime.ts @@ -13,9 +13,7 @@ import { Readable } from 'node:stream'; import { type RequestOptions } from '../core'; import { MultipartBody } from './MultipartBody'; import { type Shims } from './registry'; - -// @ts-ignore (this package does not have proper export maps for this export) -import { ReadableStream } from 'web-streams-polyfill/dist/ponyfill.es2018.js'; +import { ReadableStream } from 'node:stream/web'; type FileFromPathOptions = Omit; diff --git a/yarn.lock b/yarn.lock index dda4d2e4a..358dbf20b 100644 --- a/yarn.lock +++ b/yarn.lock @@ -3412,11 +3412,6 @@ web-streams-polyfill@4.0.0-beta.1: resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.1.tgz#3b19b9817374b7cee06d374ba7eeb3aeb80e8c95" integrity sha512-3ux37gEX670UUphBF9AMCq8XM6iQ8Ac6A+DSRRjDoRBm1ufCkaCDdNVbaqq60PsEkdNlLKrGtv/YBP4EJXqNtQ== -web-streams-polyfill@^3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-3.2.1.tgz#71c2718c52b45fd49dbeee88634b3a60ceab42a6" - integrity sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q== - webidl-conversions@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" From 83c1d179ebbe94a34a70cfbe0aa3072ed15b7cea Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 25 Jul 2024 16:35:18 +0000 Subject: [PATCH 170/533] release: 4.53.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index bdcacbf65..89bf40edf 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.53.0" + ".": "4.53.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 00fc14e39..c57f8b1e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.53.1 (2024-07-25) + +Full Changelog: [v4.53.0...v4.53.1](https://github.com/openai/openai-node/compare/v4.53.0...v4.53.1) + +### Bug Fixes + +* **compat:** remove ReadableStream polyfill redundant since node v16 ([#954](https://github.com/openai/openai-node/issues/954)) ([78b2a83](https://github.com/openai/openai-node/commit/78b2a83f085bb7ddf6a5f429636de1e3eef20f9d)) + + +### Chores + +* **tests:** update prism version ([#948](https://github.com/openai/openai-node/issues/948)) ([9202c91](https://github.com/openai/openai-node/commit/9202c91d697a116eb1b834e01f4073d254438149)) + ## 4.53.0 (2024-07-22) Full Changelog: [v4.52.7...v4.53.0](https://github.com/openai/openai-node/compare/v4.52.7...v4.53.0) diff --git a/README.md b/README.md index bf6f5ac76..06629fdda 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.53.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.53.1/mod.ts'; ``` diff --git a/package.json b/package.json index 1ea96ee7b..590d497c5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.53.0", + "version": "4.53.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 930f16d53..1952d4282 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.53.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.53.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 0f53607fc..7fc8bb1d7 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.53.0'; // x-release-please-version +export const VERSION = '4.53.1'; // x-release-please-version From f696b2a11399efbc5b4b17dc9d0f287dc3ba5e1a Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 26 Jul 2024 12:01:07 +0000 Subject: [PATCH 171/533] chore(docs): fix incorrect client var names (#955) --- README.md | 32 +++++++++---------- tests/api-resources/audio/speech.test.ts | 4 +-- .../audio/transcriptions.test.ts | 6 ++-- .../api-resources/audio/translations.test.ts | 6 ++-- tests/api-resources/batches.test.ts | 20 ++++++------ tests/api-resources/beta/assistants.test.ts | 22 ++++++------- .../beta/threads/messages.test.ts | 22 ++++++------- .../beta/threads/runs/runs.test.ts | 26 +++++++-------- .../beta/threads/runs/steps.test.ts | 12 +++---- .../beta/threads/threads.test.ts | 22 ++++++------- .../beta/vector-stores/file-batches.test.ts | 20 ++++++------ .../beta/vector-stores/files.test.ts | 20 ++++++------ .../beta/vector-stores/vector-stores.test.ts | 20 ++++++------ tests/api-resources/chat/completions.test.ts | 6 ++-- tests/api-resources/completions.test.ts | 6 ++-- tests/api-resources/embeddings.test.ts | 6 ++-- tests/api-resources/files.test.ts | 26 +++++++-------- .../fine-tuning/jobs/checkpoints.test.ts | 8 ++--- .../fine-tuning/jobs/jobs.test.ts | 26 +++++++-------- tests/api-resources/images.test.ts | 14 ++++---- tests/api-resources/models.test.ts | 14 ++++---- tests/api-resources/moderations.test.ts | 6 ++-- tests/api-resources/uploads/parts.test.ts | 6 ++-- tests/api-resources/uploads/uploads.test.ts | 14 ++++---- 24 files changed, 182 insertions(+), 182 deletions(-) diff --git a/README.md b/README.md index 06629fdda..88c132a4f 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ const client = new OpenAI({ }); async function main() { - const chatCompletion = await openai.chat.completions.create({ + const chatCompletion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo', }); @@ -56,7 +56,7 @@ import OpenAI from 'openai'; const client = new OpenAI(); async function main() { - const stream = await openai.chat.completions.create({ + const stream = await client.chat.completions.create({ model: 'gpt-4', messages: [{ role: 'user', content: 'Say this is a test' }], stream: true, @@ -89,7 +89,7 @@ async function main() { messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo', }; - const chatCompletion: OpenAI.Chat.ChatCompletion = await openai.chat.completions.create(params); + const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params); } main(); @@ -304,20 +304,20 @@ import OpenAI, { toFile } from 'openai'; const client = new OpenAI(); // If you have access to Node `fs` we recommend using `fs.createReadStream()`: -await openai.files.create({ file: fs.createReadStream('input.jsonl'), purpose: 'fine-tune' }); +await client.files.create({ file: fs.createReadStream('input.jsonl'), purpose: 'fine-tune' }); // Or if you have the web `File` API you can pass a `File` instance: -await openai.files.create({ file: new File(['my bytes'], 'input.jsonl'), purpose: 'fine-tune' }); +await client.files.create({ file: new File(['my bytes'], 'input.jsonl'), purpose: 'fine-tune' }); // You can also pass a `fetch` `Response`: -await openai.files.create({ file: await fetch('http://github.com/service/https://somesite/input.jsonl'), purpose: 'fine-tune' }); +await client.files.create({ file: await fetch('http://github.com/service/https://somesite/input.jsonl'), purpose: 'fine-tune' }); // Finally, if none of the above are convenient, you can use our `toFile` helper: -await openai.files.create({ +await client.files.create({ file: await toFile(Buffer.from('my bytes'), 'input.jsonl'), purpose: 'fine-tune', }); -await openai.files.create({ +await client.files.create({ file: await toFile(new Uint8Array([0, 1, 2]), 'input.jsonl'), purpose: 'fine-tune', }); @@ -332,7 +332,7 @@ a subclass of `APIError` will be thrown: ```ts async function main() { - const job = await openai.fineTuning.jobs + const job = await client.fineTuning.jobs .create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123' }) .catch(async (err) => { if (err instanceof OpenAI.APIError) { @@ -404,7 +404,7 @@ const client = new OpenAI({ }); // Or, configure per-request: -await openai.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, { +await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, { maxRetries: 5, }); ``` @@ -421,7 +421,7 @@ const client = new OpenAI({ }); // Override per-request: -await openai.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, { +await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, { timeout: 5 * 1000, }); ``` @@ -439,7 +439,7 @@ You can use `for await … of` syntax to iterate through items across all pages: async function fetchAllFineTuningJobs(params) { const allFineTuningJobs = []; // Automatically fetches more pages as needed. - for await (const fineTuningJob of openai.fineTuning.jobs.list({ limit: 20 })) { + for await (const fineTuningJob of client.fineTuning.jobs.list({ limit: 20 })) { allFineTuningJobs.push(fineTuningJob); } return allFineTuningJobs; @@ -449,7 +449,7 @@ async function fetchAllFineTuningJobs(params) { Alternatively, you can make request a single page at a time: ```ts -let page = await openai.fineTuning.jobs.list({ limit: 20 }); +let page = await client.fineTuning.jobs.list({ limit: 20 }); for (const fineTuningJob of page.data) { console.log(fineTuningJob); } @@ -473,13 +473,13 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi ```ts const client = new OpenAI(); -const response = await openai.chat.completions +const response = await client.chat.completions .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' }) .asResponse(); console.log(response.headers.get('X-My-Header')); console.log(response.statusText); // access the underlying Response object -const { data: chatCompletion, response: raw } = await openai.chat.completions +const { data: chatCompletion, response: raw } = await client.chat.completions .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' }) .withResponse(); console.log(raw.headers.get('X-My-Header')); @@ -587,7 +587,7 @@ const client = new OpenAI({ }); // Override per-request: -await openai.models.list({ +await client.models.list({ httpAgent: new http.Agent({ keepAlive: false }), }); ``` diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts index 7509c19ca..904d75e5d 100644 --- a/tests/api-resources/audio/speech.test.ts +++ b/tests/api-resources/audio/speech.test.ts @@ -2,7 +2,7 @@ import OpenAI from 'openai'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); @@ -10,7 +10,7 @@ const openai = new OpenAI({ describe('resource speech', () => { // binary tests are currently broken test.skip('create: required and optional params', async () => { - const response = await openai.audio.speech.create({ + const response = await client.audio.speech.create({ input: 'input', model: 'string', voice: 'alloy', diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts index 938ddd2b3..ef2797911 100644 --- a/tests/api-resources/audio/transcriptions.test.ts +++ b/tests/api-resources/audio/transcriptions.test.ts @@ -3,14 +3,14 @@ import OpenAI, { toFile } from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource transcriptions', () => { test('create: only required params', async () => { - const responsePromise = openai.audio.transcriptions.create({ + const responsePromise = client.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'whisper-1', }); @@ -24,7 +24,7 @@ describe('resource transcriptions', () => { }); test('create: required and optional params', async () => { - const response = await openai.audio.transcriptions.create({ + const response = await client.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'whisper-1', language: 'language', diff --git a/tests/api-resources/audio/translations.test.ts b/tests/api-resources/audio/translations.test.ts index 3f05bc90f..8264a5818 100644 --- a/tests/api-resources/audio/translations.test.ts +++ b/tests/api-resources/audio/translations.test.ts @@ -3,14 +3,14 @@ import OpenAI, { toFile } from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource translations', () => { test('create: only required params', async () => { - const responsePromise = openai.audio.translations.create({ + const responsePromise = client.audio.translations.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'whisper-1', }); @@ -24,7 +24,7 @@ describe('resource translations', () => { }); test('create: required and optional params', async () => { - const response = await openai.audio.translations.create({ + const response = await client.audio.translations.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'whisper-1', prompt: 'prompt', diff --git a/tests/api-resources/batches.test.ts b/tests/api-resources/batches.test.ts index 2861298a8..96e200fb9 100644 --- a/tests/api-resources/batches.test.ts +++ b/tests/api-resources/batches.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource batches', () => { test('create: only required params', async () => { - const responsePromise = openai.batches.create({ + const responsePromise = client.batches.create({ completion_window: '24h', endpoint: '/v1/chat/completions', input_file_id: 'input_file_id', @@ -25,7 +25,7 @@ describe('resource batches', () => { }); test('create: required and optional params', async () => { - const response = await openai.batches.create({ + const response = await client.batches.create({ completion_window: '24h', endpoint: '/v1/chat/completions', input_file_id: 'input_file_id', @@ -34,7 +34,7 @@ describe('resource batches', () => { }); test('retrieve', async () => { - const responsePromise = openai.batches.retrieve('batch_id'); + const responsePromise = client.batches.retrieve('batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -46,13 +46,13 @@ describe('resource batches', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.batches.retrieve('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.batches.retrieve('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); test('list', async () => { - const responsePromise = openai.batches.list(); + const responsePromise = client.batches.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -64,7 +64,7 @@ describe('resource batches', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.batches.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.batches.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); @@ -72,12 +72,12 @@ describe('resource batches', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.batches.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }), + client.batches.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('cancel', async () => { - const responsePromise = openai.batches.cancel('batch_id'); + const responsePromise = client.batches.cancel('batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -89,7 +89,7 @@ describe('resource batches', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.batches.cancel('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.batches.cancel('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index 44ee2921d..657cd76a6 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource assistants', () => { test('create: only required params', async () => { - const responsePromise = openai.beta.assistants.create({ model: 'gpt-4-turbo' }); + const responsePromise = client.beta.assistants.create({ model: 'gpt-4-turbo' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,7 +21,7 @@ describe('resource assistants', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.assistants.create({ + const response = await client.beta.assistants.create({ model: 'gpt-4-turbo', description: 'description', instructions: 'instructions', @@ -44,7 +44,7 @@ describe('resource assistants', () => { }); test('retrieve', async () => { - const responsePromise = openai.beta.assistants.retrieve('assistant_id'); + const responsePromise = client.beta.assistants.retrieve('assistant_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -57,12 +57,12 @@ describe('resource assistants', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.assistants.retrieve('assistant_id', { path: '/_stainless_unknown_path' }), + client.beta.assistants.retrieve('assistant_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = openai.beta.assistants.update('assistant_id', {}); + const responsePromise = client.beta.assistants.update('assistant_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -73,7 +73,7 @@ describe('resource assistants', () => { }); test('list', async () => { - const responsePromise = openai.beta.assistants.list(); + const responsePromise = client.beta.assistants.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -85,7 +85,7 @@ describe('resource assistants', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.beta.assistants.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.beta.assistants.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); @@ -93,7 +93,7 @@ describe('resource assistants', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.assistants.list( + client.beta.assistants.list( { after: 'after', before: 'before', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), @@ -101,7 +101,7 @@ describe('resource assistants', () => { }); test('del', async () => { - const responsePromise = openai.beta.assistants.del('assistant_id'); + const responsePromise = client.beta.assistants.del('assistant_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -114,7 +114,7 @@ describe('resource assistants', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.assistants.del('assistant_id', { path: '/_stainless_unknown_path' }), + client.beta.assistants.del('assistant_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/beta/threads/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts index 0f2877af1..bfbcab1cb 100644 --- a/tests/api-resources/beta/threads/messages.test.ts +++ b/tests/api-resources/beta/threads/messages.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource messages', () => { test('create: only required params', async () => { - const responsePromise = openai.beta.threads.messages.create('thread_id', { + const responsePromise = client.beta.threads.messages.create('thread_id', { content: 'string', role: 'user', }); @@ -24,7 +24,7 @@ describe('resource messages', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.threads.messages.create('thread_id', { + const response = await client.beta.threads.messages.create('thread_id', { content: 'string', role: 'user', attachments: [ @@ -46,7 +46,7 @@ describe('resource messages', () => { }); test('retrieve', async () => { - const responsePromise = openai.beta.threads.messages.retrieve('thread_id', 'message_id'); + const responsePromise = client.beta.threads.messages.retrieve('thread_id', 'message_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -59,12 +59,12 @@ describe('resource messages', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.messages.retrieve('thread_id', 'message_id', { path: '/_stainless_unknown_path' }), + client.beta.threads.messages.retrieve('thread_id', 'message_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = openai.beta.threads.messages.update('thread_id', 'message_id', {}); + const responsePromise = client.beta.threads.messages.update('thread_id', 'message_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -75,7 +75,7 @@ describe('resource messages', () => { }); test('list', async () => { - const responsePromise = openai.beta.threads.messages.list('thread_id'); + const responsePromise = client.beta.threads.messages.list('thread_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -88,14 +88,14 @@ describe('resource messages', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.messages.list('thread_id', { path: '/_stainless_unknown_path' }), + client.beta.threads.messages.list('thread_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.messages.list( + client.beta.threads.messages.list( 'thread_id', { after: 'after', before: 'before', limit: 0, order: 'asc', run_id: 'run_id' }, { path: '/_stainless_unknown_path' }, @@ -104,7 +104,7 @@ describe('resource messages', () => { }); test('del', async () => { - const responsePromise = openai.beta.threads.messages.del('thread_id', 'message_id'); + const responsePromise = client.beta.threads.messages.del('thread_id', 'message_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -117,7 +117,7 @@ describe('resource messages', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.messages.del('thread_id', 'message_id', { path: '/_stainless_unknown_path' }), + client.beta.threads.messages.del('thread_id', 'message_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index b422a9a3f..856eb8662 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource runs', () => { test('create: only required params', async () => { - const responsePromise = openai.beta.threads.runs.create('thread_id', { assistant_id: 'assistant_id' }); + const responsePromise = client.beta.threads.runs.create('thread_id', { assistant_id: 'assistant_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,7 +21,7 @@ describe('resource runs', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.threads.runs.create('thread_id', { + const response = await client.beta.threads.runs.create('thread_id', { assistant_id: 'assistant_id', additional_instructions: 'additional_instructions', additional_messages: [ @@ -136,7 +136,7 @@ describe('resource runs', () => { }); test('retrieve', async () => { - const responsePromise = openai.beta.threads.runs.retrieve('thread_id', 'run_id'); + const responsePromise = client.beta.threads.runs.retrieve('thread_id', 'run_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -149,12 +149,12 @@ describe('resource runs', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.runs.retrieve('thread_id', 'run_id', { path: '/_stainless_unknown_path' }), + client.beta.threads.runs.retrieve('thread_id', 'run_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = openai.beta.threads.runs.update('thread_id', 'run_id', {}); + const responsePromise = client.beta.threads.runs.update('thread_id', 'run_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -165,7 +165,7 @@ describe('resource runs', () => { }); test('list', async () => { - const responsePromise = openai.beta.threads.runs.list('thread_id'); + const responsePromise = client.beta.threads.runs.list('thread_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -178,14 +178,14 @@ describe('resource runs', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.runs.list('thread_id', { path: '/_stainless_unknown_path' }), + client.beta.threads.runs.list('thread_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.runs.list( + client.beta.threads.runs.list( 'thread_id', { after: 'after', before: 'before', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, @@ -194,7 +194,7 @@ describe('resource runs', () => { }); test('cancel', async () => { - const responsePromise = openai.beta.threads.runs.cancel('thread_id', 'run_id'); + const responsePromise = client.beta.threads.runs.cancel('thread_id', 'run_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -207,12 +207,12 @@ describe('resource runs', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.runs.cancel('thread_id', 'run_id', { path: '/_stainless_unknown_path' }), + client.beta.threads.runs.cancel('thread_id', 'run_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('submitToolOutputs: only required params', async () => { - const responsePromise = openai.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { + const responsePromise = client.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { tool_outputs: [{}, {}, {}], }); const rawResponse = await responsePromise.asResponse(); @@ -225,7 +225,7 @@ describe('resource runs', () => { }); test('submitToolOutputs: required and optional params', async () => { - const response = await openai.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { + const response = await client.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { tool_outputs: [ { tool_call_id: 'tool_call_id', output: 'output' }, { tool_call_id: 'tool_call_id', output: 'output' }, diff --git a/tests/api-resources/beta/threads/runs/steps.test.ts b/tests/api-resources/beta/threads/runs/steps.test.ts index 1981d67fd..21487c17b 100644 --- a/tests/api-resources/beta/threads/runs/steps.test.ts +++ b/tests/api-resources/beta/threads/runs/steps.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource steps', () => { test('retrieve', async () => { - const responsePromise = openai.beta.threads.runs.steps.retrieve('thread_id', 'run_id', 'step_id'); + const responsePromise = client.beta.threads.runs.steps.retrieve('thread_id', 'run_id', 'step_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -23,14 +23,14 @@ describe('resource steps', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.runs.steps.retrieve('thread_id', 'run_id', 'step_id', { + client.beta.threads.runs.steps.retrieve('thread_id', 'run_id', 'step_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('list', async () => { - const responsePromise = openai.beta.threads.runs.steps.list('thread_id', 'run_id'); + const responsePromise = client.beta.threads.runs.steps.list('thread_id', 'run_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -43,14 +43,14 @@ describe('resource steps', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.runs.steps.list('thread_id', 'run_id', { path: '/_stainless_unknown_path' }), + client.beta.threads.runs.steps.list('thread_id', 'run_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.runs.steps.list( + client.beta.threads.runs.steps.list( 'thread_id', 'run_id', { after: 'after', before: 'before', limit: 0, order: 'asc' }, diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index 0d2d93a61..2a5ebfd82 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource threads', () => { test('create', async () => { - const responsePromise = openai.beta.threads.create(); + const responsePromise = client.beta.threads.create(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -22,7 +22,7 @@ describe('resource threads', () => { test('create: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.beta.threads.create({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.beta.threads.create({ path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); @@ -30,7 +30,7 @@ describe('resource threads', () => { test('create: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.create( + client.beta.threads.create( { messages: [ { @@ -148,7 +148,7 @@ describe('resource threads', () => { }); test('retrieve', async () => { - const responsePromise = openai.beta.threads.retrieve('thread_id'); + const responsePromise = client.beta.threads.retrieve('thread_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -161,12 +161,12 @@ describe('resource threads', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.threads.retrieve('thread_id', { path: '/_stainless_unknown_path' }), + client.beta.threads.retrieve('thread_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = openai.beta.threads.update('thread_id', {}); + const responsePromise = client.beta.threads.update('thread_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -177,7 +177,7 @@ describe('resource threads', () => { }); test('del', async () => { - const responsePromise = openai.beta.threads.del('thread_id'); + const responsePromise = client.beta.threads.del('thread_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -189,13 +189,13 @@ describe('resource threads', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.beta.threads.del('thread_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.beta.threads.del('thread_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); test('createAndRun: only required params', async () => { - const responsePromise = openai.beta.threads.createAndRun({ assistant_id: 'assistant_id' }); + const responsePromise = client.beta.threads.createAndRun({ assistant_id: 'assistant_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -206,7 +206,7 @@ describe('resource threads', () => { }); test('createAndRun: required and optional params', async () => { - const response = await openai.beta.threads.createAndRun({ + const response = await client.beta.threads.createAndRun({ assistant_id: 'assistant_id', instructions: 'instructions', max_completion_tokens: 256, diff --git a/tests/api-resources/beta/vector-stores/file-batches.test.ts b/tests/api-resources/beta/vector-stores/file-batches.test.ts index 33bfd2ef7..b714049b4 100644 --- a/tests/api-resources/beta/vector-stores/file-batches.test.ts +++ b/tests/api-resources/beta/vector-stores/file-batches.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource fileBatches', () => { test('create: only required params', async () => { - const responsePromise = openai.beta.vectorStores.fileBatches.create('vs_abc123', { + const responsePromise = client.beta.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'], }); const rawResponse = await responsePromise.asResponse(); @@ -23,14 +23,14 @@ describe('resource fileBatches', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.vectorStores.fileBatches.create('vs_abc123', { + const response = await client.beta.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'], chunking_strategy: { type: 'auto' }, }); }); test('retrieve', async () => { - const responsePromise = openai.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123'); + const responsePromise = client.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -43,14 +43,14 @@ describe('resource fileBatches', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123', { + client.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('cancel', async () => { - const responsePromise = openai.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); + const responsePromise = client.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -63,14 +63,14 @@ describe('resource fileBatches', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { + client.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('listFiles', async () => { - const responsePromise = openai.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); + const responsePromise = client.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -83,7 +83,7 @@ describe('resource fileBatches', () => { test('listFiles: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { + client.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); @@ -92,7 +92,7 @@ describe('resource fileBatches', () => { test('listFiles: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.fileBatches.listFiles( + client.beta.vectorStores.fileBatches.listFiles( 'vector_store_id', 'batch_id', { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, diff --git a/tests/api-resources/beta/vector-stores/files.test.ts b/tests/api-resources/beta/vector-stores/files.test.ts index 4b21aed30..7c14d4de3 100644 --- a/tests/api-resources/beta/vector-stores/files.test.ts +++ b/tests/api-resources/beta/vector-stores/files.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource files', () => { test('create: only required params', async () => { - const responsePromise = openai.beta.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); + const responsePromise = client.beta.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,14 +21,14 @@ describe('resource files', () => { }); test('create: required and optional params', async () => { - const response = await openai.beta.vectorStores.files.create('vs_abc123', { + const response = await client.beta.vectorStores.files.create('vs_abc123', { file_id: 'file_id', chunking_strategy: { type: 'auto' }, }); }); test('retrieve', async () => { - const responsePromise = openai.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123'); + const responsePromise = client.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -41,14 +41,14 @@ describe('resource files', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123', { + client.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('list', async () => { - const responsePromise = openai.beta.vectorStores.files.list('vector_store_id'); + const responsePromise = client.beta.vectorStores.files.list('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -61,14 +61,14 @@ describe('resource files', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.files.list('vector_store_id', { path: '/_stainless_unknown_path' }), + client.beta.vectorStores.files.list('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.files.list( + client.beta.vectorStores.files.list( 'vector_store_id', { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, @@ -77,7 +77,7 @@ describe('resource files', () => { }); test('del', async () => { - const responsePromise = openai.beta.vectorStores.files.del('vector_store_id', 'file_id'); + const responsePromise = client.beta.vectorStores.files.del('vector_store_id', 'file_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -90,7 +90,7 @@ describe('resource files', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.files.del('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + client.beta.vectorStores.files.del('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/beta/vector-stores/vector-stores.test.ts b/tests/api-resources/beta/vector-stores/vector-stores.test.ts index 11dcceef8..806098de8 100644 --- a/tests/api-resources/beta/vector-stores/vector-stores.test.ts +++ b/tests/api-resources/beta/vector-stores/vector-stores.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource vectorStores', () => { test('create', async () => { - const responsePromise = openai.beta.vectorStores.create({}); + const responsePromise = client.beta.vectorStores.create({}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,7 +21,7 @@ describe('resource vectorStores', () => { }); test('retrieve', async () => { - const responsePromise = openai.beta.vectorStores.retrieve('vector_store_id'); + const responsePromise = client.beta.vectorStores.retrieve('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -34,12 +34,12 @@ describe('resource vectorStores', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.retrieve('vector_store_id', { path: '/_stainless_unknown_path' }), + client.beta.vectorStores.retrieve('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = openai.beta.vectorStores.update('vector_store_id', {}); + const responsePromise = client.beta.vectorStores.update('vector_store_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -50,7 +50,7 @@ describe('resource vectorStores', () => { }); test('list', async () => { - const responsePromise = openai.beta.vectorStores.list(); + const responsePromise = client.beta.vectorStores.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -62,7 +62,7 @@ describe('resource vectorStores', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.beta.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.beta.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); @@ -70,7 +70,7 @@ describe('resource vectorStores', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.list( + client.beta.vectorStores.list( { after: 'after', before: 'before', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), @@ -78,7 +78,7 @@ describe('resource vectorStores', () => { }); test('del', async () => { - const responsePromise = openai.beta.vectorStores.del('vector_store_id'); + const responsePromise = client.beta.vectorStores.del('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -91,7 +91,7 @@ describe('resource vectorStores', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.beta.vectorStores.del('vector_store_id', { path: '/_stainless_unknown_path' }), + client.beta.vectorStores.del('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 66ef2d023..78314074f 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource completions', () => { test('create: only required params', async () => { - const responsePromise = openai.chat.completions.create({ + const responsePromise = client.chat.completions.create({ messages: [{ content: 'content', role: 'system' }], model: 'gpt-4-turbo', }); @@ -24,7 +24,7 @@ describe('resource completions', () => { }); test('create: required and optional params', async () => { - const response = await openai.chat.completions.create({ + const response = await client.chat.completions.create({ messages: [{ content: 'content', role: 'system', name: 'name' }], model: 'gpt-4-turbo', frequency_penalty: -2, diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts index f78f7a593..82322dc3a 100644 --- a/tests/api-resources/completions.test.ts +++ b/tests/api-resources/completions.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource completions', () => { test('create: only required params', async () => { - const responsePromise = openai.completions.create({ model: 'string', prompt: 'This is a test.' }); + const responsePromise = client.completions.create({ model: 'string', prompt: 'This is a test.' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,7 +21,7 @@ describe('resource completions', () => { }); test('create: required and optional params', async () => { - const response = await openai.completions.create({ + const response = await client.completions.create({ model: 'string', prompt: 'This is a test.', best_of: 0, diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts index d4e1f3240..46dd1b2a3 100644 --- a/tests/api-resources/embeddings.test.ts +++ b/tests/api-resources/embeddings.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource embeddings', () => { test('create: only required params', async () => { - const responsePromise = openai.embeddings.create({ + const responsePromise = client.embeddings.create({ input: 'The quick brown fox jumped over the lazy dog', model: 'text-embedding-3-small', }); @@ -24,7 +24,7 @@ describe('resource embeddings', () => { }); test('create: required and optional params', async () => { - const response = await openai.embeddings.create({ + const response = await client.embeddings.create({ input: 'The quick brown fox jumped over the lazy dog', model: 'text-embedding-3-small', dimensions: 1, diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index 55eded995..bbaa45a65 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -3,14 +3,14 @@ import OpenAI, { toFile } from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource files', () => { test('create: only required params', async () => { - const responsePromise = openai.files.create({ + const responsePromise = client.files.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), purpose: 'assistants', }); @@ -24,14 +24,14 @@ describe('resource files', () => { }); test('create: required and optional params', async () => { - const response = await openai.files.create({ + const response = await client.files.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), purpose: 'assistants', }); }); test('retrieve', async () => { - const responsePromise = openai.files.retrieve('file_id'); + const responsePromise = client.files.retrieve('file_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -43,13 +43,13 @@ describe('resource files', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.files.retrieve('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.files.retrieve('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); test('list', async () => { - const responsePromise = openai.files.list(); + const responsePromise = client.files.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -61,7 +61,7 @@ describe('resource files', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.files.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.files.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); @@ -69,12 +69,12 @@ describe('resource files', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.files.list({ purpose: 'purpose' }, { path: '/_stainless_unknown_path' }), + client.files.list({ purpose: 'purpose' }, { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('del', async () => { - const responsePromise = openai.files.del('file_id'); + const responsePromise = client.files.del('file_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -86,20 +86,20 @@ describe('resource files', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.files.del('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.files.del('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); test('content: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.files.content('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.files.content('file_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); test('retrieveContent', async () => { - const responsePromise = openai.files.retrieveContent('file_id'); + const responsePromise = client.files.retrieveContent('file_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -112,7 +112,7 @@ describe('resource files', () => { test('retrieveContent: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.files.retrieveContent('file_id', { path: '/_stainless_unknown_path' }), + client.files.retrieveContent('file_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts b/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts index 3a01448e2..d211a9b10 100644 --- a/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts +++ b/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource checkpoints', () => { test('list', async () => { - const responsePromise = openai.fineTuning.jobs.checkpoints.list('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const responsePromise = client.fineTuning.jobs.checkpoints.list('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -23,7 +23,7 @@ describe('resource checkpoints', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.fineTuning.jobs.checkpoints.list('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { + client.fineTuning.jobs.checkpoints.list('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); @@ -32,7 +32,7 @@ describe('resource checkpoints', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.fineTuning.jobs.checkpoints.list( + client.fineTuning.jobs.checkpoints.list( 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', { after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }, diff --git a/tests/api-resources/fine-tuning/jobs/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts index c14912c3a..04de7ee21 100644 --- a/tests/api-resources/fine-tuning/jobs/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource jobs', () => { test('create: only required params', async () => { - const responsePromise = openai.fineTuning.jobs.create({ + const responsePromise = client.fineTuning.jobs.create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123', }); @@ -24,7 +24,7 @@ describe('resource jobs', () => { }); test('create: required and optional params', async () => { - const response = await openai.fineTuning.jobs.create({ + const response = await client.fineTuning.jobs.create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123', hyperparameters: { batch_size: 'auto', learning_rate_multiplier: 'auto', n_epochs: 'auto' }, @@ -64,7 +64,7 @@ describe('resource jobs', () => { }); test('retrieve', async () => { - const responsePromise = openai.fineTuning.jobs.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const responsePromise = client.fineTuning.jobs.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -77,12 +77,12 @@ describe('resource jobs', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.fineTuning.jobs.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }), + client.fineTuning.jobs.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('list', async () => { - const responsePromise = openai.fineTuning.jobs.list(); + const responsePromise = client.fineTuning.jobs.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -94,7 +94,7 @@ describe('resource jobs', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.fineTuning.jobs.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.fineTuning.jobs.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); @@ -102,12 +102,12 @@ describe('resource jobs', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.fineTuning.jobs.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }), + client.fineTuning.jobs.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('cancel', async () => { - const responsePromise = openai.fineTuning.jobs.cancel('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const responsePromise = client.fineTuning.jobs.cancel('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -120,12 +120,12 @@ describe('resource jobs', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.fineTuning.jobs.cancel('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }), + client.fineTuning.jobs.cancel('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('listEvents', async () => { - const responsePromise = openai.fineTuning.jobs.listEvents('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const responsePromise = client.fineTuning.jobs.listEvents('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -138,14 +138,14 @@ describe('resource jobs', () => { test('listEvents: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.fineTuning.jobs.listEvents('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }), + client.fineTuning.jobs.listEvents('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('listEvents: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.fineTuning.jobs.listEvents( + client.fineTuning.jobs.listEvents( 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', { after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }, diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index 33d633a63..43e67b030 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -3,14 +3,14 @@ import OpenAI, { toFile } from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource images', () => { test('createVariation: only required params', async () => { - const responsePromise = openai.images.createVariation({ + const responsePromise = client.images.createVariation({ image: await toFile(Buffer.from('# my file contents'), 'README.md'), }); const rawResponse = await responsePromise.asResponse(); @@ -23,7 +23,7 @@ describe('resource images', () => { }); test('createVariation: required and optional params', async () => { - const response = await openai.images.createVariation({ + const response = await client.images.createVariation({ image: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'dall-e-2', n: 1, @@ -34,7 +34,7 @@ describe('resource images', () => { }); test('edit: only required params', async () => { - const responsePromise = openai.images.edit({ + const responsePromise = client.images.edit({ image: await toFile(Buffer.from('# my file contents'), 'README.md'), prompt: 'A cute baby sea otter wearing a beret', }); @@ -48,7 +48,7 @@ describe('resource images', () => { }); test('edit: required and optional params', async () => { - const response = await openai.images.edit({ + const response = await client.images.edit({ image: await toFile(Buffer.from('# my file contents'), 'README.md'), prompt: 'A cute baby sea otter wearing a beret', mask: await toFile(Buffer.from('# my file contents'), 'README.md'), @@ -61,7 +61,7 @@ describe('resource images', () => { }); test('generate: only required params', async () => { - const responsePromise = openai.images.generate({ prompt: 'A cute baby sea otter' }); + const responsePromise = client.images.generate({ prompt: 'A cute baby sea otter' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -72,7 +72,7 @@ describe('resource images', () => { }); test('generate: required and optional params', async () => { - const response = await openai.images.generate({ + const response = await client.images.generate({ prompt: 'A cute baby sea otter', model: 'dall-e-3', n: 1, diff --git a/tests/api-resources/models.test.ts b/tests/api-resources/models.test.ts index ca1f98365..eee91d020 100644 --- a/tests/api-resources/models.test.ts +++ b/tests/api-resources/models.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource models', () => { test('retrieve', async () => { - const responsePromise = openai.models.retrieve('gpt-3.5-turbo'); + const responsePromise = client.models.retrieve('gpt-3.5-turbo'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -23,12 +23,12 @@ describe('resource models', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.models.retrieve('gpt-3.5-turbo', { path: '/_stainless_unknown_path' }), + client.models.retrieve('gpt-3.5-turbo', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('list', async () => { - const responsePromise = openai.models.list(); + const responsePromise = client.models.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -40,13 +40,13 @@ describe('resource models', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(openai.models.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.models.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); test('del', async () => { - const responsePromise = openai.models.del('ft:gpt-3.5-turbo:acemeco:suffix:abc123'); + const responsePromise = client.models.del('ft:gpt-3.5-turbo:acemeco:suffix:abc123'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -59,7 +59,7 @@ describe('resource models', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.models.del('ft:gpt-3.5-turbo:acemeco:suffix:abc123', { path: '/_stainless_unknown_path' }), + client.models.del('ft:gpt-3.5-turbo:acemeco:suffix:abc123', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/moderations.test.ts b/tests/api-resources/moderations.test.ts index ef7298fa9..0df1f0371 100644 --- a/tests/api-resources/moderations.test.ts +++ b/tests/api-resources/moderations.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource moderations', () => { test('create: only required params', async () => { - const responsePromise = openai.moderations.create({ input: 'I want to kill them.' }); + const responsePromise = client.moderations.create({ input: 'I want to kill them.' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,7 +21,7 @@ describe('resource moderations', () => { }); test('create: required and optional params', async () => { - const response = await openai.moderations.create({ + const response = await client.moderations.create({ input: 'I want to kill them.', model: 'text-moderation-stable', }); diff --git a/tests/api-resources/uploads/parts.test.ts b/tests/api-resources/uploads/parts.test.ts index 5e69c5861..e584bab8e 100644 --- a/tests/api-resources/uploads/parts.test.ts +++ b/tests/api-resources/uploads/parts.test.ts @@ -3,14 +3,14 @@ import OpenAI, { toFile } from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource parts', () => { test('create: only required params', async () => { - const responsePromise = openai.uploads.parts.create('upload_abc123', { + const responsePromise = client.uploads.parts.create('upload_abc123', { data: await toFile(Buffer.from('# my file contents'), 'README.md'), }); const rawResponse = await responsePromise.asResponse(); @@ -23,7 +23,7 @@ describe('resource parts', () => { }); test('create: required and optional params', async () => { - const response = await openai.uploads.parts.create('upload_abc123', { + const response = await client.uploads.parts.create('upload_abc123', { data: await toFile(Buffer.from('# my file contents'), 'README.md'), }); }); diff --git a/tests/api-resources/uploads/uploads.test.ts b/tests/api-resources/uploads/uploads.test.ts index 08f059d1b..e4e3c6d30 100644 --- a/tests/api-resources/uploads/uploads.test.ts +++ b/tests/api-resources/uploads/uploads.test.ts @@ -3,14 +3,14 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; -const openai = new OpenAI({ +const client = new OpenAI({ apiKey: 'My API Key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); describe('resource uploads', () => { test('create: only required params', async () => { - const responsePromise = openai.uploads.create({ + const responsePromise = client.uploads.create({ bytes: 0, filename: 'filename', mime_type: 'mime_type', @@ -26,7 +26,7 @@ describe('resource uploads', () => { }); test('create: required and optional params', async () => { - const response = await openai.uploads.create({ + const response = await client.uploads.create({ bytes: 0, filename: 'filename', mime_type: 'mime_type', @@ -35,7 +35,7 @@ describe('resource uploads', () => { }); test('cancel', async () => { - const responsePromise = openai.uploads.cancel('upload_abc123'); + const responsePromise = client.uploads.cancel('upload_abc123'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -48,12 +48,12 @@ describe('resource uploads', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - openai.uploads.cancel('upload_abc123', { path: '/_stainless_unknown_path' }), + client.uploads.cancel('upload_abc123', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('complete: only required params', async () => { - const responsePromise = openai.uploads.complete('upload_abc123', { + const responsePromise = client.uploads.complete('upload_abc123', { part_ids: ['string', 'string', 'string'], }); const rawResponse = await responsePromise.asResponse(); @@ -66,7 +66,7 @@ describe('resource uploads', () => { }); test('complete: required and optional params', async () => { - const response = await openai.uploads.complete('upload_abc123', { + const response = await client.uploads.complete('upload_abc123', { part_ids: ['string', 'string', 'string'], md5: 'md5', }); From e258161d5a3bbceb8a62f4bc019e4571fda53e90 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 26 Jul 2024 12:01:28 +0000 Subject: [PATCH 172/533] release: 4.53.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 89bf40edf..a1a6f4a3a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.53.1" + ".": "4.53.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index c57f8b1e0..f0569806c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.53.2 (2024-07-26) + +Full Changelog: [v4.53.1...v4.53.2](https://github.com/openai/openai-node/compare/v4.53.1...v4.53.2) + +### Chores + +* **docs:** fix incorrect client var names ([#955](https://github.com/openai/openai-node/issues/955)) ([cc91be8](https://github.com/openai/openai-node/commit/cc91be867bf7042abb2ee6c6d5ef69082ac64280)) + ## 4.53.1 (2024-07-25) Full Changelog: [v4.53.0...v4.53.1](https://github.com/openai/openai-node/compare/v4.53.0...v4.53.1) diff --git a/README.md b/README.md index 88c132a4f..5fed94642 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.53.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.53.2/mod.ts'; ``` diff --git a/package.json b/package.json index 590d497c5..5eaac9d39 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.53.1", + "version": "4.53.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 1952d4282..9ab99752c 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.53.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.53.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 7fc8bb1d7..3bfe73d03 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.53.1'; // x-release-please-version +export const VERSION = '4.53.2'; // x-release-please-version From 62f5e22f87a95953aeb39f846e97fea8afb768af Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 29 Jul 2024 19:47:25 +0000 Subject: [PATCH 173/533] chore(internal): add constant for default timeout (#960) --- src/index.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/index.ts b/src/index.ts index 7e5df0505..ad8749ebb 100644 --- a/src/index.ts +++ b/src/index.ts @@ -184,6 +184,7 @@ export class OpenAI extends Core.APIClient { } static OpenAI = this; + static DEFAULT_TIMEOUT = 600000; // 10 minutes static OpenAIError = Errors.OpenAIError; static APIError = Errors.APIError; From b0c2e26f8d979a3cffae1466e2f6613aeee1767a Mon Sep 17 00:00:00 2001 From: Guspan Tanadi <36249910+guspan-tanadi@users.noreply.github.com> Date: Tue, 30 Jul 2024 20:28:01 +0700 Subject: [PATCH 174/533] docs(README): link Lifecycle in Polling Helpers section (#962) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5fed94642..c498880e0 100644 --- a/README.md +++ b/README.md @@ -115,7 +115,7 @@ const run = await openai.beta.threads.runs.createAndPoll(thread.id, { }); ``` -More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle) +More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/deep-dive/run-lifecycle) ### Bulk Upload Helpers From 5acd6f214e0dedb4229d875daff106e853d4a263 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 30 Jul 2024 15:42:13 +0100 Subject: [PATCH 175/533] chore(internal): cleanup event stream helpers (#950) * [wip]: refactor * a solution * Bind this * fix formatting --------- Co-authored-by: Young-Jin Park --- src/lib/AbstractChatCompletionRunner.ts | 255 ++---------------- src/lib/AssistantStream.ts | 58 ++-- src/lib/ChatCompletionRunner.ts | 2 +- src/lib/ChatCompletionStream.ts | 4 +- ...ssistantStreamRunner.ts => EventStream.ts} | 175 +++--------- 5 files changed, 99 insertions(+), 395 deletions(-) rename src/lib/{AbstractAssistantStreamRunner.ts => EventStream.ts} (55%) diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts index 5764b85b2..590013aa6 100644 --- a/src/lib/AbstractChatCompletionRunner.ts +++ b/src/lib/AbstractChatCompletionRunner.ts @@ -8,7 +8,7 @@ import { type ChatCompletionCreateParams, type ChatCompletionTool, } from 'openai/resources/chat/completions'; -import { APIUserAbortError, OpenAIError } from 'openai/error'; +import { OpenAIError } from 'openai/error'; import { type RunnableFunction, isRunnableFunctionWithParse, @@ -20,6 +20,7 @@ import { ChatCompletionStreamingToolRunnerParams, } from './ChatCompletionStreamingRunner'; import { isAssistantMessage, isFunctionMessage, isToolMessage } from './chatCompletionUtils'; +import { BaseEvents, EventStream } from './EventStream'; const DEFAULT_MAX_CHAT_COMPLETIONS = 10; export interface RunnerOptions extends Core.RequestOptions { @@ -27,60 +28,16 @@ export interface RunnerOptions extends Core.RequestOptions { maxChatCompletions?: number; } -export abstract class AbstractChatCompletionRunner< - Events extends CustomEvents = AbstractChatCompletionRunnerEvents, -> { - controller: AbortController = new AbortController(); - - #connectedPromise: Promise; - #resolveConnectedPromise: () => void = () => {}; - #rejectConnectedPromise: (error: OpenAIError) => void = () => {}; - - #endPromise: Promise; - #resolveEndPromise: () => void = () => {}; - #rejectEndPromise: (error: OpenAIError) => void = () => {}; - - #listeners: { [Event in keyof Events]?: ListenersForEvent } = {}; - +export class AbstractChatCompletionRunner< + EventTypes extends AbstractChatCompletionRunnerEvents, +> extends EventStream { protected _chatCompletions: ChatCompletion[] = []; messages: ChatCompletionMessageParam[] = []; - #ended = false; - #errored = false; - #aborted = false; - #catchingPromiseCreated = false; - - constructor() { - this.#connectedPromise = new Promise((resolve, reject) => { - this.#resolveConnectedPromise = resolve; - this.#rejectConnectedPromise = reject; - }); - - this.#endPromise = new Promise((resolve, reject) => { - this.#resolveEndPromise = resolve; - this.#rejectEndPromise = reject; - }); - - // Don't let these promises cause unhandled rejection errors. - // we will manually cause an unhandled rejection error later - // if the user hasn't registered any error listener or called - // any promise-returning method. - this.#connectedPromise.catch(() => {}); - this.#endPromise.catch(() => {}); - } - - protected _run(executor: () => Promise) { - // Unfortunately if we call `executor()` immediately we get runtime errors about - // references to `this` before the `super()` constructor call returns. - setTimeout(() => { - executor().then(() => { - this._emitFinal(); - this._emit('end'); - }, this.#handleError); - }, 0); - } - - protected _addChatCompletion(chatCompletion: ChatCompletion): ChatCompletion { + protected _addChatCompletion( + this: AbstractChatCompletionRunner, + chatCompletion: ChatCompletion, + ): ChatCompletion { this._chatCompletions.push(chatCompletion); this._emit('chatCompletion', chatCompletion); const message = chatCompletion.choices[0]?.message; @@ -88,7 +45,11 @@ export abstract class AbstractChatCompletionRunner< return chatCompletion; } - protected _addMessage(message: ChatCompletionMessageParam, emit = true) { + protected _addMessage( + this: AbstractChatCompletionRunner, + message: ChatCompletionMessageParam, + emit = true, + ) { if (!('content' in message)) message.content = null; this.messages.push(message); @@ -110,99 +71,6 @@ export abstract class AbstractChatCompletionRunner< } } - protected _connected() { - if (this.ended) return; - this.#resolveConnectedPromise(); - this._emit('connect'); - } - - get ended(): boolean { - return this.#ended; - } - - get errored(): boolean { - return this.#errored; - } - - get aborted(): boolean { - return this.#aborted; - } - - abort() { - this.controller.abort(); - } - - /** - * Adds the listener function to the end of the listeners array for the event. - * No checks are made to see if the listener has already been added. Multiple calls passing - * the same combination of event and listener will result in the listener being added, and - * called, multiple times. - * @returns this ChatCompletionStream, so that calls can be chained - */ - on(event: Event, listener: ListenerForEvent): this { - const listeners: ListenersForEvent = - this.#listeners[event] || (this.#listeners[event] = []); - listeners.push({ listener }); - return this; - } - - /** - * Removes the specified listener from the listener array for the event. - * off() will remove, at most, one instance of a listener from the listener array. If any single - * listener has been added multiple times to the listener array for the specified event, then - * off() must be called multiple times to remove each instance. - * @returns this ChatCompletionStream, so that calls can be chained - */ - off(event: Event, listener: ListenerForEvent): this { - const listeners = this.#listeners[event]; - if (!listeners) return this; - const index = listeners.findIndex((l) => l.listener === listener); - if (index >= 0) listeners.splice(index, 1); - return this; - } - - /** - * Adds a one-time listener function for the event. The next time the event is triggered, - * this listener is removed and then invoked. - * @returns this ChatCompletionStream, so that calls can be chained - */ - once(event: Event, listener: ListenerForEvent): this { - const listeners: ListenersForEvent = - this.#listeners[event] || (this.#listeners[event] = []); - listeners.push({ listener, once: true }); - return this; - } - - /** - * This is similar to `.once()`, but returns a Promise that resolves the next time - * the event is triggered, instead of calling a listener callback. - * @returns a Promise that resolves the next time given event is triggered, - * or rejects if an error is emitted. (If you request the 'error' event, - * returns a promise that resolves with the error). - * - * Example: - * - * const message = await stream.emitted('message') // rejects if the stream errors - */ - emitted( - event: Event, - ): Promise< - EventParameters extends [infer Param] ? Param - : EventParameters extends [] ? void - : EventParameters - > { - return new Promise((resolve, reject) => { - this.#catchingPromiseCreated = true; - if (event !== 'error') this.once('error', reject); - this.once(event, resolve as any); - }); - } - - async done(): Promise { - this.#catchingPromiseCreated = true; - await this.#endPromise; - } - /** * @returns a promise that resolves with the final ChatCompletion, or rejects * if an error occurred or the stream ended prematurely without producing a ChatCompletion. @@ -327,75 +195,7 @@ export abstract class AbstractChatCompletionRunner< return [...this._chatCompletions]; } - #handleError = (error: unknown) => { - this.#errored = true; - if (error instanceof Error && error.name === 'AbortError') { - error = new APIUserAbortError(); - } - if (error instanceof APIUserAbortError) { - this.#aborted = true; - return this._emit('abort', error); - } - if (error instanceof OpenAIError) { - return this._emit('error', error); - } - if (error instanceof Error) { - const openAIError: OpenAIError = new OpenAIError(error.message); - // @ts-ignore - openAIError.cause = error; - return this._emit('error', openAIError); - } - return this._emit('error', new OpenAIError(String(error))); - }; - - protected _emit(event: Event, ...args: EventParameters) { - // make sure we don't emit any events after end - if (this.#ended) { - return; - } - - if (event === 'end') { - this.#ended = true; - this.#resolveEndPromise(); - } - - const listeners: ListenersForEvent | undefined = this.#listeners[event]; - if (listeners) { - this.#listeners[event] = listeners.filter((l) => !l.once) as any; - listeners.forEach(({ listener }: any) => listener(...args)); - } - - if (event === 'abort') { - const error = args[0] as APIUserAbortError; - if (!this.#catchingPromiseCreated && !listeners?.length) { - Promise.reject(error); - } - this.#rejectConnectedPromise(error); - this.#rejectEndPromise(error); - this._emit('end'); - return; - } - - if (event === 'error') { - // NOTE: _emit('error', error) should only be called from #handleError(). - - const error = args[0] as OpenAIError; - if (!this.#catchingPromiseCreated && !listeners?.length) { - // Trigger an unhandled rejection if the user hasn't registered any error handlers. - // If you are seeing stack traces here, make sure to handle errors via either: - // - runner.on('error', () => ...) - // - await runner.done() - // - await runner.finalChatCompletion() - // - etc. - Promise.reject(error); - } - this.#rejectConnectedPromise(error); - this.#rejectEndPromise(error); - this._emit('end'); - } - } - - protected _emitFinal() { + protected override _emitFinal(this: AbstractChatCompletionRunner) { const completion = this._chatCompletions[this._chatCompletions.length - 1]; if (completion) this._emit('finalChatCompletion', completion); const finalMessage = this.#getFinalMessage(); @@ -650,27 +450,7 @@ export abstract class AbstractChatCompletionRunner< } } -type CustomEvents = { - [k in Event]: k extends keyof AbstractChatCompletionRunnerEvents ? AbstractChatCompletionRunnerEvents[k] - : (...args: any[]) => void; -}; - -type ListenerForEvent, Event extends keyof Events> = Event extends ( - keyof AbstractChatCompletionRunnerEvents -) ? - AbstractChatCompletionRunnerEvents[Event] -: Events[Event]; - -type ListenersForEvent, Event extends keyof Events> = Array<{ - listener: ListenerForEvent; - once?: boolean; -}>; -type EventParameters, Event extends keyof Events> = Parameters< - ListenerForEvent ->; - -export interface AbstractChatCompletionRunnerEvents { - connect: () => void; +export interface AbstractChatCompletionRunnerEvents extends BaseEvents { functionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void; message: (message: ChatCompletionMessageParam) => void; chatCompletion: (completion: ChatCompletion) => void; @@ -680,8 +460,5 @@ export interface AbstractChatCompletionRunnerEvents { finalFunctionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void; functionCallResult: (content: string) => void; finalFunctionCallResult: (content: string) => void; - error: (error: OpenAIError) => void; - abort: (error: APIUserAbortError) => void; - end: () => void; totalUsage: (usage: CompletionUsage) => void; } diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index de7511b5d..0f88530b3 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -19,10 +19,6 @@ import { RunSubmitToolOutputsParamsBase, RunSubmitToolOutputsParamsStreaming, } from 'openai/resources/beta/threads/runs/runs'; -import { - AbstractAssistantRunnerEvents, - AbstractAssistantStreamRunner, -} from './AbstractAssistantStreamRunner'; import { type ReadableStream } from 'openai/_shims/index'; import { Stream } from 'openai/streaming'; import { APIUserAbortError, OpenAIError } from 'openai/error'; @@ -34,9 +30,12 @@ import { } from 'openai/resources/beta/assistants'; import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from 'openai/resources/beta/threads/runs/steps'; import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads'; +import { BaseEvents, EventStream } from './EventStream'; import MessageDelta = Messages.MessageDelta; -export interface AssistantStreamEvents extends AbstractAssistantRunnerEvents { +export interface AssistantStreamEvents extends BaseEvents { + run: (run: Run) => void; + //New event structure messageCreated: (message: Message) => void; messageDelta: (message: MessageDelta, snapshot: Message) => void; @@ -57,8 +56,6 @@ export interface AssistantStreamEvents extends AbstractAssistantRunnerEvents { //No created or delta as this is not streamed imageFileDone: (content: ImageFile, snapshot: Message) => void; - end: () => void; - event: (event: AssistantStreamEvent) => void; } @@ -75,7 +72,7 @@ export type RunSubmitToolOutputsParamsStream = Omit + extends EventStream implements AsyncIterable { //Track all events in a single list for reference @@ -207,7 +204,7 @@ export class AssistantStream return runner; } - protected override async _createToolAssistantStream( + protected async _createToolAssistantStream( run: Runs, threadId: string, runId: string, @@ -304,7 +301,7 @@ export class AssistantStream return this.#finalRun; } - protected override async _createThreadAssistantStream( + protected async _createThreadAssistantStream( thread: Threads, params: ThreadCreateAndRunParamsBase, options?: Core.RequestOptions, @@ -330,7 +327,7 @@ export class AssistantStream return this._addRun(this.#endRequest()); } - protected override async _createAssistantStream( + protected async _createAssistantStream( run: Runs, threadId: string, params: RunCreateParamsBase, @@ -417,7 +414,7 @@ export class AssistantStream return this.#finalRun; } - #handleMessage(event: MessageStreamEvent) { + #handleMessage(this: AssistantStream, event: MessageStreamEvent) { const [accumulatedMessage, newContent] = this.#accumulateMessage(event, this.#messageSnapshot); this.#messageSnapshot = accumulatedMessage; this.#messageSnapshots[accumulatedMessage.id] = accumulatedMessage; @@ -500,7 +497,7 @@ export class AssistantStream } } - #handleRunStep(event: RunStepStreamEvent) { + #handleRunStep(this: AssistantStream, event: RunStepStreamEvent) { const accumulatedRunStep = this.#accumulateRunStep(event); this.#currentRunStepSnapshot = accumulatedRunStep; @@ -556,7 +553,7 @@ export class AssistantStream } } - #handleEvent(event: AssistantStreamEvent) { + #handleEvent(this: AssistantStream, event: AssistantStreamEvent) { this.#events.push(event); this._emit('event', event); } @@ -696,7 +693,7 @@ export class AssistantStream return acc; } - #handleRun(event: RunStreamEvent) { + #handleRun(this: AssistantStream, event: RunStreamEvent) { this.#currentRunSnapshot = event.data; switch (event.event) { case 'thread.run.created': @@ -720,4 +717,35 @@ export class AssistantStream break; } } + + protected _addRun(run: Run): Run { + return run; + } + + protected async _threadAssistantStream( + body: ThreadCreateAndRunParamsBase, + thread: Threads, + options?: Core.RequestOptions, + ): Promise { + return await this._createThreadAssistantStream(thread, body, options); + } + + protected async _runAssistantStream( + threadId: string, + runs: Runs, + params: RunCreateParamsBase, + options?: Core.RequestOptions, + ): Promise { + return await this._createAssistantStream(runs, threadId, params, options); + } + + protected async _runToolAssistantStream( + threadId: string, + runId: string, + runs: Runs, + params: RunSubmitToolOutputsParamsStream, + options?: Core.RequestOptions, + ): Promise { + return await this._createToolAssistantStream(runs, threadId, runId, params, options); + } } diff --git a/src/lib/ChatCompletionRunner.ts b/src/lib/ChatCompletionRunner.ts index a110f0192..c756919b0 100644 --- a/src/lib/ChatCompletionRunner.ts +++ b/src/lib/ChatCompletionRunner.ts @@ -59,7 +59,7 @@ export class ChatCompletionRunner extends AbstractChatCompletionRunner { + [Symbol.asyncIterator](this: ChatCompletionStream): AsyncIterator { const pushQueue: ChatCompletionChunk[] = []; const readQueue: { resolve: (chunk: ChatCompletionChunk | undefined) => void; diff --git a/src/lib/AbstractAssistantStreamRunner.ts b/src/lib/EventStream.ts similarity index 55% rename from src/lib/AbstractAssistantStreamRunner.ts rename to src/lib/EventStream.ts index b600f0df3..a18c771dd 100644 --- a/src/lib/AbstractAssistantStreamRunner.ts +++ b/src/lib/EventStream.ts @@ -1,12 +1,6 @@ -import * as Core from 'openai/core'; import { APIUserAbortError, OpenAIError } from 'openai/error'; -import { Run, RunSubmitToolOutputsParamsBase } from 'openai/resources/beta/threads/runs/runs'; -import { RunCreateParamsBase, Runs } from 'openai/resources/beta/threads/runs/runs'; -import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads'; -export abstract class AbstractAssistantStreamRunner< - Events extends CustomEvents = AbstractAssistantRunnerEvents, -> { +export class EventStream { controller: AbortController = new AbortController(); #connectedPromise: Promise; @@ -17,7 +11,9 @@ export abstract class AbstractAssistantStreamRunner< #resolveEndPromise: () => void = () => {}; #rejectEndPromise: (error: OpenAIError) => void = () => {}; - #listeners: { [Event in keyof Events]?: ListenersForEvent } = {}; + #listeners: { + [Event in keyof EventTypes]?: EventListeners; + } = {}; #ended = false; #errored = false; @@ -43,22 +39,18 @@ export abstract class AbstractAssistantStreamRunner< this.#endPromise.catch(() => {}); } - protected _run(executor: () => Promise) { + protected _run(this: EventStream, executor: () => Promise) { // Unfortunately if we call `executor()` immediately we get runtime errors about // references to `this` before the `super()` constructor call returns. setTimeout(() => { executor().then(() => { - // this._emitFinal(); + this._emitFinal(); this._emit('end'); - }, this.#handleError); + }, this.#handleError.bind(this)); }, 0); } - protected _addRun(run: Run): Run { - return run; - } - - protected _connected() { + protected _connected(this: EventStream) { if (this.ended) return; this.#resolveConnectedPromise(); this._emit('connect'); @@ -87,8 +79,8 @@ export abstract class AbstractAssistantStreamRunner< * called, multiple times. * @returns this ChatCompletionStream, so that calls can be chained */ - on(event: Event, listener: ListenerForEvent): this { - const listeners: ListenersForEvent = + on(event: Event, listener: EventListener): this { + const listeners: EventListeners = this.#listeners[event] || (this.#listeners[event] = []); listeners.push({ listener }); return this; @@ -101,7 +93,7 @@ export abstract class AbstractAssistantStreamRunner< * off() must be called multiple times to remove each instance. * @returns this ChatCompletionStream, so that calls can be chained */ - off(event: Event, listener: ListenerForEvent): this { + off(event: Event, listener: EventListener): this { const listeners = this.#listeners[event]; if (!listeners) return this; const index = listeners.findIndex((l) => l.listener === listener); @@ -114,8 +106,8 @@ export abstract class AbstractAssistantStreamRunner< * this listener is removed and then invoked. * @returns this ChatCompletionStream, so that calls can be chained */ - once(event: Event, listener: ListenerForEvent): this { - const listeners: ListenersForEvent = + once(event: Event, listener: EventListener): this { + const listeners: EventListeners = this.#listeners[event] || (this.#listeners[event] = []); listeners.push({ listener, once: true }); return this; @@ -132,12 +124,12 @@ export abstract class AbstractAssistantStreamRunner< * * const message = await stream.emitted('message') // rejects if the stream errors */ - emitted( + emitted( event: Event, ): Promise< - EventParameters extends [infer Param] ? Param - : EventParameters extends [] ? void - : EventParameters + EventParameters extends [infer Param] ? Param + : EventParameters extends [] ? void + : EventParameters > { return new Promise((resolve, reject) => { this.#catchingPromiseCreated = true; @@ -151,7 +143,7 @@ export abstract class AbstractAssistantStreamRunner< await this.#endPromise; } - #handleError = (error: unknown) => { + #handleError(this: EventStream, error: unknown) { this.#errored = true; if (error instanceof Error && error.name === 'AbortError') { error = new APIUserAbortError(); @@ -170,9 +162,15 @@ export abstract class AbstractAssistantStreamRunner< return this._emit('error', openAIError); } return this._emit('error', new OpenAIError(String(error))); - }; + } - protected _emit(event: Event, ...args: EventParameters) { + _emit(event: Event, ...args: EventParameters): void; + _emit(event: Event, ...args: EventParameters): void; + _emit( + this: EventStream, + event: Event, + ...args: EventParameters + ) { // make sure we don't emit any events after end if (this.#ended) { return; @@ -183,10 +181,10 @@ export abstract class AbstractAssistantStreamRunner< this.#resolveEndPromise(); } - const listeners: ListenersForEvent | undefined = this.#listeners[event]; + const listeners: EventListeners | undefined = this.#listeners[event]; if (listeners) { this.#listeners[event] = listeners.filter((l) => !l.once) as any; - listeners.forEach(({ listener }: any) => listener(...args)); + listeners.forEach(({ listener }: any) => listener(...(args as any))); } if (event === 'abort') { @@ -219,121 +217,22 @@ export abstract class AbstractAssistantStreamRunner< } } - protected async _threadAssistantStream( - body: ThreadCreateAndRunParamsBase, - thread: Threads, - options?: Core.RequestOptions, - ): Promise { - return await this._createThreadAssistantStream(thread, body, options); - } - - protected async _runAssistantStream( - threadId: string, - runs: Runs, - params: RunCreateParamsBase, - options?: Core.RequestOptions, - ): Promise { - return await this._createAssistantStream(runs, threadId, params, options); - } - - protected async _runToolAssistantStream( - threadId: string, - runId: string, - runs: Runs, - params: RunSubmitToolOutputsParamsBase, - options?: Core.RequestOptions, - ): Promise { - return await this._createToolAssistantStream(runs, threadId, runId, params, options); - } - - protected async _createThreadAssistantStream( - thread: Threads, - body: ThreadCreateAndRunParamsBase, - options?: Core.RequestOptions, - ): Promise { - const signal = options?.signal; - if (signal) { - if (signal.aborted) this.controller.abort(); - signal.addEventListener('abort', () => this.controller.abort()); - } - // this.#validateParams(params); - - const runResult = await thread.createAndRun( - { ...body, stream: false }, - { ...options, signal: this.controller.signal }, - ); - this._connected(); - return this._addRun(runResult as Run); - } - - protected async _createToolAssistantStream( - run: Runs, - threadId: string, - runId: string, - params: RunSubmitToolOutputsParamsBase, - options?: Core.RequestOptions, - ): Promise { - const signal = options?.signal; - if (signal) { - if (signal.aborted) this.controller.abort(); - signal.addEventListener('abort', () => this.controller.abort()); - } - - const runResult = await run.submitToolOutputs( - threadId, - runId, - { ...params, stream: false }, - { ...options, signal: this.controller.signal }, - ); - this._connected(); - return this._addRun(runResult as Run); - } - - protected async _createAssistantStream( - run: Runs, - threadId: string, - params: RunCreateParamsBase, - options?: Core.RequestOptions, - ): Promise { - const signal = options?.signal; - if (signal) { - if (signal.aborted) this.controller.abort(); - signal.addEventListener('abort', () => this.controller.abort()); - } - // this.#validateParams(params); - - const runResult = await run.create( - threadId, - { ...params, stream: false }, - { ...options, signal: this.controller.signal }, - ); - this._connected(); - return this._addRun(runResult as Run); - } + protected _emitFinal(): void {} } -type CustomEvents = { - [k in Event]: k extends keyof AbstractAssistantRunnerEvents ? AbstractAssistantRunnerEvents[k] - : (...args: any[]) => void; -}; +type EventListener = Events[EventType]; -type ListenerForEvent, Event extends keyof Events> = Event extends ( - keyof AbstractAssistantRunnerEvents -) ? - AbstractAssistantRunnerEvents[Event] -: Events[Event]; - -type ListenersForEvent, Event extends keyof Events> = Array<{ - listener: ListenerForEvent; +type EventListeners = Array<{ + listener: EventListener; once?: boolean; }>; -type EventParameters, Event extends keyof Events> = Parameters< - ListenerForEvent ->; -export interface AbstractAssistantRunnerEvents { +export type EventParameters = { + [Event in EventType]: EventListener extends (...args: infer P) => any ? P : never; +}[EventType]; + +export interface BaseEvents { connect: () => void; - run: (run: Run) => void; error: (error: OpenAIError) => void; abort: (error: APIUserAbortError) => void; end: () => void; From 9920e1d14ce8a8115fd07d716e7459151b6c4ddd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 16:31:55 +0000 Subject: [PATCH 176/533] chore(ci): correctly tag pre-release npm packages (#963) --- bin/publish-npm | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/bin/publish-npm b/bin/publish-npm index 4d6c9f357..4c21181bb 100644 --- a/bin/publish-npm +++ b/bin/publish-npm @@ -2,8 +2,24 @@ set -eux -npm config set //registry.npmjs.org/:_authToken $NPM_TOKEN +npm config set '//registry.npmjs.org/:_authToken' "$NPM_TOKEN" +# Build the project yarn build + +# Navigate to the dist directory cd dist -yarn publish --access public + +# Get the version from package.json +VERSION="$(node -p "require('./package.json').version")" + +# Extract the pre-release tag if it exists +if [[ "$VERSION" =~ -([a-zA-Z]+) ]]; then + # Extract the part before any dot in the pre-release identifier + TAG="${BASH_REMATCH[1]}" +else + TAG="latest" +fi + +# Publish with the appropriate tag +yarn publish --access public --tag "$TAG" From 7f9704e1811439850424f72d15cd44671a32f205 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 2 Aug 2024 02:47:09 +0000 Subject: [PATCH 177/533] feat: extract out `ImageModel`, `AudioModel`, `SpeechModel` (#964) --- api.md | 10 +++++++++ src/index.ts | 3 +++ src/resources/audio/audio.ts | 5 +++++ src/resources/audio/index.ts | 4 ++-- src/resources/audio/speech.ts | 5 ++++- src/resources/audio/transcriptions.ts | 3 ++- src/resources/audio/translations.ts | 3 ++- src/resources/beta/assistants.ts | 26 ++---------------------- src/resources/beta/threads/runs/runs.ts | 27 ++----------------------- src/resources/beta/threads/threads.ts | 27 ++----------------------- src/resources/images.ts | 9 ++++++--- src/resources/index.ts | 11 ++++++++-- src/resources/moderations.ts | 5 ++++- 13 files changed, 53 insertions(+), 85 deletions(-) diff --git a/api.md b/api.md index ddc9fce38..acae0b8e8 100644 --- a/api.md +++ b/api.md @@ -88,6 +88,7 @@ Methods: Types: - Image +- ImageModel - ImagesResponse Methods: @@ -98,6 +99,10 @@ Methods: # Audio +Types: + +- AudioModel + ## Transcriptions Types: @@ -120,6 +125,10 @@ Methods: ## Speech +Types: + +- SpeechModel + Methods: - client.audio.speech.create({ ...params }) -> Response @@ -129,6 +138,7 @@ Methods: Types: - Moderation +- ModerationModel - ModerationCreateResponse Methods: diff --git a/src/index.ts b/src/index.ts index ad8749ebb..cd0dd67b3 100644 --- a/src/index.ts +++ b/src/index.ts @@ -282,15 +282,18 @@ export namespace OpenAI { export import Images = API.Images; export import Image = API.Image; + export import ImageModel = API.ImageModel; export import ImagesResponse = API.ImagesResponse; export import ImageCreateVariationParams = API.ImageCreateVariationParams; export import ImageEditParams = API.ImageEditParams; export import ImageGenerateParams = API.ImageGenerateParams; export import Audio = API.Audio; + export import AudioModel = API.AudioModel; export import Moderations = API.Moderations; export import Moderation = API.Moderation; + export import ModerationModel = API.ModerationModel; export import ModerationCreateResponse = API.ModerationCreateResponse; export import ModerationCreateParams = API.ModerationCreateParams; diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index a89bf0102..1f0269d03 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -1,6 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import * as AudioAPI from './audio'; import * as SpeechAPI from './speech'; import * as TranscriptionsAPI from './transcriptions'; import * as TranslationsAPI from './translations'; @@ -11,7 +12,10 @@ export class Audio extends APIResource { speech: SpeechAPI.Speech = new SpeechAPI.Speech(this._client); } +export type AudioModel = 'whisper-1'; + export namespace Audio { + export import AudioModel = AudioAPI.AudioModel; export import Transcriptions = TranscriptionsAPI.Transcriptions; export import Transcription = TranscriptionsAPI.Transcription; export import TranscriptionCreateParams = TranscriptionsAPI.TranscriptionCreateParams; @@ -19,5 +23,6 @@ export namespace Audio { export import Translation = TranslationsAPI.Translation; export import TranslationCreateParams = TranslationsAPI.TranslationCreateParams; export import Speech = SpeechAPI.Speech; + export import SpeechModel = SpeechAPI.SpeechModel; export import SpeechCreateParams = SpeechAPI.SpeechCreateParams; } diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts index 31732a267..a7f935964 100644 --- a/src/resources/audio/index.ts +++ b/src/resources/audio/index.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { Audio } from './audio'; -export { SpeechCreateParams, Speech } from './speech'; +export { AudioModel, Audio } from './audio'; +export { SpeechModel, SpeechCreateParams, Speech } from './speech'; export { Transcription, TranscriptionCreateParams, Transcriptions } from './transcriptions'; export { Translation, TranslationCreateParams, Translations } from './translations'; diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index d0a6e7f31..34fb26b02 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -14,6 +14,8 @@ export class Speech extends APIResource { } } +export type SpeechModel = 'tts-1' | 'tts-1-hd'; + export interface SpeechCreateParams { /** * The text to generate audio for. The maximum length is 4096 characters. @@ -24,7 +26,7 @@ export interface SpeechCreateParams { * One of the available [TTS models](https://platform.openai.com/docs/models/tts): * `tts-1` or `tts-1-hd` */ - model: (string & {}) | 'tts-1' | 'tts-1-hd'; + model: (string & {}) | SpeechModel; /** * The voice to use when generating the audio. Supported voices are `alloy`, @@ -48,5 +50,6 @@ export interface SpeechCreateParams { } export namespace Speech { + export import SpeechModel = SpeechAPI.SpeechModel; export import SpeechCreateParams = SpeechAPI.SpeechCreateParams; } diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 0eb4e4b7c..5c30d6c59 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -3,6 +3,7 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; import * as TranscriptionsAPI from './transcriptions'; +import * as AudioAPI from './audio'; export class Transcriptions extends APIResource { /** @@ -35,7 +36,7 @@ export interface TranscriptionCreateParams { * ID of the model to use. Only `whisper-1` (which is powered by our open source * Whisper V2 model) is currently available. */ - model: (string & {}) | 'whisper-1'; + model: (string & {}) | AudioAPI.AudioModel; /** * The language of the input audio. Supplying the input language in diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 48fddc2ee..dedc15b65 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -3,6 +3,7 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; import * as TranslationsAPI from './translations'; +import * as AudioAPI from './audio'; export class Translations extends APIResource { /** @@ -28,7 +29,7 @@ export interface TranslationCreateParams { * ID of the model to use. Only `whisper-1` (which is powered by our open source * Whisper V2 model) is currently available. */ - model: (string & {}) | 'whisper-1'; + model: (string & {}) | AudioAPI.AudioModel; /** * An optional text to guide the model's style or continue a previous audio diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index abacfd06e..d66b03768 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -5,6 +5,7 @@ import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as AssistantsAPI from './assistants'; import * as Shared from '../shared'; +import * as ChatAPI from '../chat/chat'; import * as MessagesAPI from './threads/messages'; import * as ThreadsAPI from './threads/threads'; import * as RunsAPI from './threads/runs/runs'; @@ -1053,30 +1054,7 @@ export interface AssistantCreateParams { * [Model overview](https://platform.openai.com/docs/models/overview) for * descriptions of them. */ - model: - | (string & {}) - | 'gpt-4o' - | 'gpt-4o-2024-05-13' - | 'gpt-4o-mini' - | 'gpt-4o-mini-2024-07-18' - | 'gpt-4-turbo' - | 'gpt-4-turbo-2024-04-09' - | 'gpt-4-0125-preview' - | 'gpt-4-turbo-preview' - | 'gpt-4-1106-preview' - | 'gpt-4-vision-preview' - | 'gpt-4' - | 'gpt-4-0314' - | 'gpt-4-0613' - | 'gpt-4-32k' - | 'gpt-4-32k-0314' - | 'gpt-4-32k-0613' - | 'gpt-3.5-turbo' - | 'gpt-3.5-turbo-16k' - | 'gpt-3.5-turbo-0613' - | 'gpt-3.5-turbo-1106' - | 'gpt-3.5-turbo-0125' - | 'gpt-3.5-turbo-16k-0613'; + model: (string & {}) | ChatAPI.ChatModel; /** * The description of the assistant. The maximum length is 512 characters. diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 24b6ce4a2..db9827616 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -9,6 +9,7 @@ import { sleep } from '../../../../core'; import { RunSubmitToolOutputsParamsStream } from '../../../../lib/AssistantStream'; import * as RunsAPI from './runs'; import * as AssistantsAPI from '../../assistants'; +import * as ChatAPI from '../../../chat/chat'; import * as MessagesAPI from '../messages'; import * as ThreadsAPI from '../threads'; import * as StepsAPI from './steps'; @@ -668,31 +669,7 @@ export interface RunCreateParamsBase { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: - | (string & {}) - | 'gpt-4o' - | 'gpt-4o-2024-05-13' - | 'gpt-4o-mini' - | 'gpt-4o-mini-2024-07-18' - | 'gpt-4-turbo' - | 'gpt-4-turbo-2024-04-09' - | 'gpt-4-0125-preview' - | 'gpt-4-turbo-preview' - | 'gpt-4-1106-preview' - | 'gpt-4-vision-preview' - | 'gpt-4' - | 'gpt-4-0314' - | 'gpt-4-0613' - | 'gpt-4-32k' - | 'gpt-4-32k-0314' - | 'gpt-4-32k-0613' - | 'gpt-3.5-turbo' - | 'gpt-3.5-turbo-16k' - | 'gpt-3.5-turbo-0613' - | 'gpt-3.5-turbo-1106' - | 'gpt-3.5-turbo-0125' - | 'gpt-3.5-turbo-16k-0613' - | null; + model?: (string & {}) | ChatAPI.ChatModel | null; /** * Whether to enable diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 04ce7b57d..0b931a911 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -7,6 +7,7 @@ import { APIPromise } from '../../../core'; import * as Core from '../../../core'; import * as ThreadsAPI from './threads'; import * as AssistantsAPI from '../assistants'; +import * as ChatAPI from '../../chat/chat'; import * as MessagesAPI from './messages'; import * as RunsAPI from './runs/runs'; import { Stream } from '../../../streaming'; @@ -545,31 +546,7 @@ export interface ThreadCreateAndRunParamsBase { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: - | (string & {}) - | 'gpt-4o' - | 'gpt-4o-2024-05-13' - | 'gpt-4o-mini' - | 'gpt-4o-mini-2024-07-18' - | 'gpt-4-turbo' - | 'gpt-4-turbo-2024-04-09' - | 'gpt-4-0125-preview' - | 'gpt-4-turbo-preview' - | 'gpt-4-1106-preview' - | 'gpt-4-vision-preview' - | 'gpt-4' - | 'gpt-4-0314' - | 'gpt-4-0613' - | 'gpt-4-32k' - | 'gpt-4-32k-0314' - | 'gpt-4-32k-0613' - | 'gpt-3.5-turbo' - | 'gpt-3.5-turbo-16k' - | 'gpt-3.5-turbo-0613' - | 'gpt-3.5-turbo-1106' - | 'gpt-3.5-turbo-0125' - | 'gpt-3.5-turbo-16k-0613' - | null; + model?: (string & {}) | ChatAPI.ChatModel | null; /** * Whether to enable diff --git a/src/resources/images.ts b/src/resources/images.ts index 24af635b2..fdd0b8881 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -52,6 +52,8 @@ export interface Image { url?: string; } +export type ImageModel = 'dall-e-2' | 'dall-e-3'; + export interface ImagesResponse { created: number; @@ -69,7 +71,7 @@ export interface ImageCreateVariationParams { * The model to use for image generation. Only `dall-e-2` is supported at this * time. */ - model?: (string & {}) | 'dall-e-2' | null; + model?: (string & {}) | ImageModel | null; /** * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only @@ -122,7 +124,7 @@ export interface ImageEditParams { * The model to use for image generation. Only `dall-e-2` is supported at this * time. */ - model?: (string & {}) | 'dall-e-2' | null; + model?: (string & {}) | ImageModel | null; /** * The number of images to generate. Must be between 1 and 10. @@ -160,7 +162,7 @@ export interface ImageGenerateParams { /** * The model to use for image generation. */ - model?: (string & {}) | 'dall-e-2' | 'dall-e-3' | null; + model?: (string & {}) | ImageModel | null; /** * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only @@ -207,6 +209,7 @@ export interface ImageGenerateParams { export namespace Images { export import Image = ImagesAPI.Image; + export import ImageModel = ImagesAPI.ImageModel; export import ImagesResponse = ImagesAPI.ImagesResponse; export import ImageCreateVariationParams = ImagesAPI.ImageCreateVariationParams; export import ImageEditParams = ImagesAPI.ImageEditParams; diff --git a/src/resources/index.ts b/src/resources/index.ts index 9f2a3cbe7..8d952e2db 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -2,7 +2,7 @@ export * from './chat/index'; export * from './shared'; -export { Audio } from './audio/audio'; +export { AudioModel, Audio } from './audio/audio'; export { Batch, BatchError, @@ -35,6 +35,7 @@ export { export { FineTuning } from './fine-tuning/fine-tuning'; export { Image, + ImageModel, ImagesResponse, ImageCreateVariationParams, ImageEditParams, @@ -42,5 +43,11 @@ export { Images, } from './images'; export { Model, ModelDeleted, ModelsPage, Models } from './models'; -export { Moderation, ModerationCreateResponse, ModerationCreateParams, Moderations } from './moderations'; +export { + Moderation, + ModerationModel, + ModerationCreateResponse, + ModerationCreateParams, + Moderations, +} from './moderations'; export { Upload, UploadCreateParams, UploadCompleteParams, Uploads } from './uploads/uploads'; diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index 86fbbc6b2..f80bc7acb 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -168,6 +168,8 @@ export namespace Moderation { } } +export type ModerationModel = 'text-moderation-latest' | 'text-moderation-stable'; + /** * Represents if a given text input is potentially harmful. */ @@ -204,11 +206,12 @@ export interface ModerationCreateParams { * model. Accuracy of `text-moderation-stable` may be slightly lower than for * `text-moderation-latest`. */ - model?: (string & {}) | 'text-moderation-latest' | 'text-moderation-stable'; + model?: (string & {}) | ModerationModel; } export namespace Moderations { export import Moderation = ModerationsAPI.Moderation; + export import ModerationModel = ModerationsAPI.ModerationModel; export import ModerationCreateResponse = ModerationsAPI.ModerationCreateResponse; export import ModerationCreateParams = ModerationsAPI.ModerationCreateParams; } From 823aa3791db7526da47c98e318c0c2933f13228a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 2 Aug 2024 04:04:23 +0000 Subject: [PATCH 178/533] feat: make enums not nominal (#965) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 4e4cb5509..6cc775763 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-77cfff37114bc9f141c7e6107eb5f1b38d8cc99bc3d4ce03a066db2b6b649c69.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b04761ffd2adad3cc19a6dc6fc696ac445878219972f891881a967340fa9a6b0.yml From f72b4036c2fef5900f013529365211ac22477a07 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 2 Aug 2024 04:04:49 +0000 Subject: [PATCH 179/533] release: 4.54.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 21 +++++++++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 26 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a1a6f4a3a..402862bfb 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.53.2" + ".": "4.54.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index f0569806c..a285f7d15 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## 4.54.0 (2024-08-02) + +Full Changelog: [v4.53.2...v4.54.0](https://github.com/openai/openai-node/compare/v4.53.2...v4.54.0) + +### Features + +* extract out `ImageModel`, `AudioModel`, `SpeechModel` ([#964](https://github.com/openai/openai-node/issues/964)) ([1edf957](https://github.com/openai/openai-node/commit/1edf957e1cb86c2a7b2d29e28f2b8f428ea0cd7d)) +* make enums not nominal ([#965](https://github.com/openai/openai-node/issues/965)) ([0dd0cd1](https://github.com/openai/openai-node/commit/0dd0cd158d6765c3a04ac983aad03c2ecad14502)) + + +### Chores + +* **ci:** correctly tag pre-release npm packages ([#963](https://github.com/openai/openai-node/issues/963)) ([f1a4a68](https://github.com/openai/openai-node/commit/f1a4a686bbf4a38919b8597f008d895d1b99d8df)) +* **internal:** add constant for default timeout ([#960](https://github.com/openai/openai-node/issues/960)) ([55c01f4](https://github.com/openai/openai-node/commit/55c01f4dc5d132c21713f9e8606b95abc76fcd44)) +* **internal:** cleanup event stream helpers ([#950](https://github.com/openai/openai-node/issues/950)) ([8f49956](https://github.com/openai/openai-node/commit/8f499566c47bd7d4799a8cbe0d980553348b8f48)) + + +### Documentation + +* **README:** link Lifecycle in Polling Helpers section ([#962](https://github.com/openai/openai-node/issues/962)) ([c610c81](https://github.com/openai/openai-node/commit/c610c813e8d7f96b5b8315ae194e0a9ff565f43d)) + ## 4.53.2 (2024-07-26) Full Changelog: [v4.53.1...v4.53.2](https://github.com/openai/openai-node/compare/v4.53.1...v4.53.2) diff --git a/README.md b/README.md index c498880e0..ea1d54f5b 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.53.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.54.0/mod.ts'; ``` diff --git a/package.json b/package.json index 5eaac9d39..3f6c722d1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.53.2", + "version": "4.54.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 9ab99752c..438b11e6f 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.53.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.54.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 3bfe73d03..bca401fd1 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.53.2'; // x-release-please-version +export const VERSION = '4.54.0'; // x-release-please-version From c972cfc5a00fe391d5a9b8f6e394c731b235440a Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 6 Aug 2024 18:11:57 +0100 Subject: [PATCH 180/533] feat(api): add structured outputs support This commit adds support for JSON schema response format & adds a separate `.beta.chat.completions.parse()` method to automatically deserialise the response content into a zod schema with the zodResponseFormat() helper function. For more details on structured outputs, see this guide https://platform.openai.com/docs/guides/structured-outputs --- .github/workflows/ci.yml | 1 - .stats.yml | 2 +- api.md | 7 +- examples/parsing-run-tools.ts | 153 ++++++ examples/parsing-stream.ts | 57 +++ examples/parsing-tools-stream.ts | 43 ++ examples/parsing-tools.ts | 67 +++ examples/parsing.ts | 36 ++ examples/stream.ts | 2 +- examples/tool-call-helpers-zod.ts | 2 +- helpers.md | 194 +++++++- jest.config.ts | 1 + package.json | 14 +- scripts/build | 2 +- src/_vendor/partial-json-parser/README.md | 3 + src/_vendor/partial-json-parser/parser.ts | 264 ++++++++++ src/_vendor/zod-to-json-schema/Options.ts | 73 +++ src/_vendor/zod-to-json-schema/Refs.ts | 39 ++ .../zod-to-json-schema/errorMessages.ts | 31 ++ src/_vendor/zod-to-json-schema/index.ts | 37 ++ src/_vendor/zod-to-json-schema/parseDef.ts | 231 +++++++++ src/_vendor/zod-to-json-schema/parsers/any.ts | 5 + .../zod-to-json-schema/parsers/array.ts | 36 ++ .../zod-to-json-schema/parsers/bigint.ts | 60 +++ .../zod-to-json-schema/parsers/boolean.ts | 9 + .../zod-to-json-schema/parsers/branded.ts | 7 + .../zod-to-json-schema/parsers/catch.ts | 7 + .../zod-to-json-schema/parsers/date.ts | 83 ++++ .../zod-to-json-schema/parsers/default.ts | 10 + .../zod-to-json-schema/parsers/effects.ts | 7 + .../zod-to-json-schema/parsers/enum.ts | 13 + .../parsers/intersection.ts | 64 +++ .../zod-to-json-schema/parsers/literal.ts | 37 ++ src/_vendor/zod-to-json-schema/parsers/map.ts | 42 ++ .../zod-to-json-schema/parsers/nativeEnum.ts | 27 + .../zod-to-json-schema/parsers/never.ts | 9 + .../zod-to-json-schema/parsers/null.ts | 16 + .../zod-to-json-schema/parsers/nullable.ts | 49 ++ .../zod-to-json-schema/parsers/number.ts | 62 +++ .../zod-to-json-schema/parsers/object.ts | 63 +++ .../zod-to-json-schema/parsers/optional.ts | 25 + .../zod-to-json-schema/parsers/pipeline.ts | 28 ++ .../zod-to-json-schema/parsers/promise.ts | 7 + .../zod-to-json-schema/parsers/readonly.ts | 7 + .../zod-to-json-schema/parsers/record.ts | 73 +++ src/_vendor/zod-to-json-schema/parsers/set.ts | 36 ++ .../zod-to-json-schema/parsers/string.ts | 400 +++++++++++++++ .../zod-to-json-schema/parsers/tuple.ts | 54 ++ .../zod-to-json-schema/parsers/undefined.ts | 9 + .../zod-to-json-schema/parsers/union.ts | 119 +++++ .../zod-to-json-schema/parsers/unknown.ts | 5 + .../zod-to-json-schema/zodToJsonSchema.ts | 91 ++++ src/error.ts | 12 + src/helpers/zod.ts | 102 ++++ src/index.ts | 4 + src/lib/AbstractChatCompletionRunner.ts | 85 +++- src/lib/AssistantStream.ts | 12 +- src/lib/ChatCompletionRunner.ts | 28 +- src/lib/ChatCompletionStream.ts | 470 ++++++++++++++++-- src/lib/ChatCompletionStreamingRunner.ts | 34 +- src/lib/RunnableFunction.ts | 6 +- src/lib/parser.ts | 235 +++++++++ src/resources/beta/assistants.ts | 19 +- src/resources/beta/beta.ts | 1 - src/resources/beta/chat/completions.ts | 104 +++- src/resources/beta/index.ts | 1 - src/resources/beta/threads/index.ts | 3 +- src/resources/beta/threads/messages.ts | 43 +- src/resources/beta/threads/runs/runs.ts | 10 + src/resources/beta/threads/threads.ts | 31 +- src/resources/beta/vector-stores/files.ts | 2 +- src/resources/chat/chat.ts | 2 + src/resources/chat/completions.ts | 74 ++- src/resources/chat/index.ts | 1 + src/resources/fine-tuning/jobs/jobs.ts | 6 +- src/resources/shared.ts | 62 +++ tests/api-resources/beta/assistants.test.ts | 6 +- .../beta/threads/runs/runs.test.ts | 4 +- .../beta/threads/threads.test.ts | 4 +- tests/api-resources/chat/completions.test.ts | 16 +- .../fine-tuning/jobs/jobs.test.ts | 4 +- tests/api-resources/models.test.ts | 12 +- tests/helpers/zod.test.ts | 269 ++++++++++ .../lib/ChatCompletionRunFunctions.test.ts | 308 ++++++++---- tests/lib/ChatCompletionStream.test.ts | 383 ++++++++++++++ .../ChatCompletionStream.test.ts.snap | 99 ++++ tests/lib/__snapshots__/parser.test.ts.snap | 28 ++ tests/lib/parser.test.ts | 47 ++ tests/utils/mock-fetch.ts | 68 +++ tests/utils/mock-snapshots.ts | 124 +++++ yarn.lock | 10 + 91 files changed, 5148 insertions(+), 300 deletions(-) create mode 100644 examples/parsing-run-tools.ts create mode 100644 examples/parsing-stream.ts create mode 100644 examples/parsing-tools-stream.ts create mode 100644 examples/parsing-tools.ts create mode 100644 examples/parsing.ts create mode 100644 src/_vendor/partial-json-parser/README.md create mode 100644 src/_vendor/partial-json-parser/parser.ts create mode 100644 src/_vendor/zod-to-json-schema/Options.ts create mode 100644 src/_vendor/zod-to-json-schema/Refs.ts create mode 100644 src/_vendor/zod-to-json-schema/errorMessages.ts create mode 100644 src/_vendor/zod-to-json-schema/index.ts create mode 100644 src/_vendor/zod-to-json-schema/parseDef.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/any.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/array.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/bigint.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/boolean.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/branded.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/catch.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/date.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/default.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/effects.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/enum.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/intersection.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/literal.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/map.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/nativeEnum.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/never.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/null.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/nullable.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/number.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/object.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/optional.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/pipeline.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/promise.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/readonly.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/record.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/set.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/string.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/tuple.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/undefined.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/union.ts create mode 100644 src/_vendor/zod-to-json-schema/parsers/unknown.ts create mode 100644 src/_vendor/zod-to-json-schema/zodToJsonSchema.ts create mode 100644 src/helpers/zod.ts create mode 100644 src/lib/parser.ts create mode 100644 tests/helpers/zod.test.ts rename {src => tests}/lib/ChatCompletionRunFunctions.test.ts (91%) create mode 100644 tests/lib/ChatCompletionStream.test.ts create mode 100644 tests/lib/__snapshots__/ChatCompletionStream.test.ts.snap create mode 100644 tests/lib/__snapshots__/parser.test.ts.snap create mode 100644 tests/lib/parser.test.ts create mode 100644 tests/utils/mock-fetch.ts create mode 100644 tests/utils/mock-snapshots.ts diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3be379044..68f80399b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,4 +45,3 @@ jobs: - name: Run tests run: ./scripts/test - diff --git a/.stats.yml b/.stats.yml index 6cc775763..da2675831 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b04761ffd2adad3cc19a6dc6fc696ac445878219972f891881a967340fa9a6b0.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c36d30a94622922f83d56a025cdf0095ff7cb18a5138838c698c8443f21fb3a8.yml diff --git a/api.md b/api.md index acae0b8e8..25f08b130 100644 --- a/api.md +++ b/api.md @@ -5,6 +5,9 @@ Types: - ErrorObject - FunctionDefinition - FunctionParameters +- ResponseFormatJSONObject +- ResponseFormatJSONSchema +- ResponseFormatText # Completions @@ -33,6 +36,7 @@ Types: - ChatCompletionChunk - ChatCompletionContentPart - ChatCompletionContentPartImage +- ChatCompletionContentPartRefusal - ChatCompletionContentPartText - ChatCompletionFunctionCallOption - ChatCompletionFunctionMessageParam @@ -277,7 +281,6 @@ Methods: Types: -- AssistantResponseFormat - AssistantResponseFormatOption - AssistantToolChoice - AssistantToolChoiceFunction @@ -370,6 +373,8 @@ Types: - MessageDeleted - MessageDelta - MessageDeltaEvent +- RefusalContentBlock +- RefusalDeltaBlock - Text - TextContentBlock - TextContentBlockParam diff --git a/examples/parsing-run-tools.ts b/examples/parsing-run-tools.ts new file mode 100644 index 000000000..a3c544c3d --- /dev/null +++ b/examples/parsing-run-tools.ts @@ -0,0 +1,153 @@ +import OpenAI from 'openai'; +import z from 'zod'; +import { zodFunction } from 'openai/helpers/zod'; + +const Table = z.enum(['orders', 'customers', 'products']); +const Column = z.enum([ + 'id', + 'status', + 'expected_delivery_date', + 'delivered_at', + 'shipped_at', + 'ordered_at', + 'canceled_at', +]); +const Operator = z.enum(['=', '>', '<', '<=', '>=', '!=']); +const OrderBy = z.enum(['asc', 'desc']); + +const DynamicValue = z.object({ + column_name: z.string(), +}); + +const Condition = z.object({ + column: z.string(), + operator: Operator, + value: z.union([z.string(), z.number(), DynamicValue]), +}); + +const openai = new OpenAI(); + +async function main() { + const runner = openai.beta.chat.completions + .runTools({ + model: 'gpt-4o-2024-08-06', + messages: [{ role: 'user', content: `What are the last 10 orders?` }], + stream: true, + tools: [ + zodFunction({ + name: 'query', + function: (args) => { + return { table_name: args.table_name, data: fakeOrders }; + }, + parameters: z.object({ + location: z.string(), + table_name: Table, + columns: z.array(Column), + conditions: z.array(Condition), + order_by: OrderBy, + }), + }), + ], + }) + .on('tool_calls.function.arguments.done', (props) => + console.log(`parsed function arguments: ${props.parsed_arguments}`), + ); + + await runner.done(); + + console.dir(runner.messages, { depth: 10 }); +} + +const fakeOrders = [ + { + orderId: 'ORD-001', + customerName: 'Alice Johnson', + products: [{ name: 'Wireless Headphones', quantity: 1, price: 89.99 }], + totalPrice: 89.99, + orderDate: '2024-08-02', + }, + { + orderId: 'ORD-002', + customerName: 'Bob Smith', + products: [ + { name: 'Smartphone Case', quantity: 2, price: 19.99 }, + { name: 'Screen Protector', quantity: 1, price: 9.99 }, + ], + totalPrice: 49.97, + orderDate: '2024-08-03', + }, + { + orderId: 'ORD-003', + customerName: 'Carol Davis', + products: [ + { name: 'Laptop', quantity: 1, price: 999.99 }, + { name: 'Mouse', quantity: 1, price: 29.99 }, + ], + totalPrice: 1029.98, + orderDate: '2024-08-04', + }, + { + orderId: 'ORD-004', + customerName: 'David Wilson', + products: [{ name: 'Coffee Maker', quantity: 1, price: 79.99 }], + totalPrice: 79.99, + orderDate: '2024-08-05', + }, + { + orderId: 'ORD-005', + customerName: 'Eva Brown', + products: [ + { name: 'Fitness Tracker', quantity: 1, price: 129.99 }, + { name: 'Water Bottle', quantity: 2, price: 14.99 }, + ], + totalPrice: 159.97, + orderDate: '2024-08-06', + }, + { + orderId: 'ORD-006', + customerName: 'Frank Miller', + products: [ + { name: 'Gaming Console', quantity: 1, price: 499.99 }, + { name: 'Controller', quantity: 2, price: 59.99 }, + ], + totalPrice: 619.97, + orderDate: '2024-08-07', + }, + { + orderId: 'ORD-007', + customerName: 'Grace Lee', + products: [{ name: 'Bluetooth Speaker', quantity: 1, price: 69.99 }], + totalPrice: 69.99, + orderDate: '2024-08-08', + }, + { + orderId: 'ORD-008', + customerName: 'Henry Taylor', + products: [ + { name: 'Smartwatch', quantity: 1, price: 199.99 }, + { name: 'Watch Band', quantity: 2, price: 24.99 }, + ], + totalPrice: 249.97, + orderDate: '2024-08-09', + }, + { + orderId: 'ORD-009', + customerName: 'Isla Garcia', + products: [ + { name: 'Tablet', quantity: 1, price: 349.99 }, + { name: 'Tablet Case', quantity: 1, price: 29.99 }, + { name: 'Stylus', quantity: 1, price: 39.99 }, + ], + totalPrice: 419.97, + orderDate: '2024-08-10', + }, + { + orderId: 'ORD-010', + customerName: 'Jack Robinson', + products: [{ name: 'Wireless Charger', quantity: 2, price: 34.99 }], + totalPrice: 69.98, + orderDate: '2024-08-11', + }, +]; + +main(); diff --git a/examples/parsing-stream.ts b/examples/parsing-stream.ts new file mode 100644 index 000000000..d9eda0a4b --- /dev/null +++ b/examples/parsing-stream.ts @@ -0,0 +1,57 @@ +import { zodResponseFormat } from 'openai/helpers/zod'; +import OpenAI from 'openai/index'; +import { z } from 'zod'; + +const Step = z.object({ + explanation: z.string(), + output: z.string(), +}); + +const MathResponse = z.object({ + steps: z.array(Step), + final_answer: z.string(), +}); + +async function main() { + const client = new OpenAI(); + + const stream = client.beta.chat.completions + .stream({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'user', + content: `What's the weather like in SF?`, + }, + ], + response_format: zodResponseFormat(MathResponse, 'math_response'), + }) + .on('refusal.delta', ({ delta }) => { + process.stdout.write(delta); + }) + .on('refusal.done', () => console.log('\n\nrequest refused 😱')) + .on('content.delta', ({ snapshot, parsed }) => { + console.log('content:', snapshot); + console.log('parsed:', parsed); + console.log(); + }) + .on('content.done', (props) => { + if (props.parsed) { + console.log('\n\nfinished parsing!'); + console.log(`answer: ${props.parsed.final_answer}`); + } + }); + + await stream.done(); + + const completion = await stream.finalChatCompletion(); + + console.dir(completion, { depth: 5 }); + + const message = completion.choices[0]?.message; + if (message?.parsed) { + console.log(message.parsed.steps); + } +} + +main(); diff --git a/examples/parsing-tools-stream.ts b/examples/parsing-tools-stream.ts new file mode 100644 index 000000000..c527abd00 --- /dev/null +++ b/examples/parsing-tools-stream.ts @@ -0,0 +1,43 @@ +import { zodFunction } from 'openai/helpers/zod'; +import OpenAI from 'openai/index'; +import { z } from 'zod'; + +const GetWeatherArgs = z.object({ + city: z.string(), + country: z.string(), + units: z.enum(['c', 'f']).default('c'), +}); + +async function main() { + const client = new OpenAI(); + const refusal = process.argv.includes('refusal'); + + const stream = client.beta.chat.completions + .stream({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'user', + content: refusal ? 'How do I make anthrax?' : `What's the weather like in SF?`, + }, + ], + tools: [zodFunction({ name: 'get_weather', parameters: GetWeatherArgs })], + }) + .on('tool_calls.function.arguments.delta', (props) => + console.log('tool_calls.function.arguments.delta', props), + ) + .on('tool_calls.function.arguments.done', (props) => + console.log('tool_calls.function.arguments.done', props), + ) + .on('refusal.delta', ({ delta }) => { + process.stdout.write(delta); + }) + .on('refusal.done', () => console.log('\n\nrequest refused 😱')); + + const completion = await stream.finalChatCompletion(); + + console.log('final completion:'); + console.dir(completion, { depth: 10 }); +} + +main(); diff --git a/examples/parsing-tools.ts b/examples/parsing-tools.ts new file mode 100644 index 000000000..8eaea3807 --- /dev/null +++ b/examples/parsing-tools.ts @@ -0,0 +1,67 @@ +import { zodFunction } from 'openai/helpers/zod'; +import OpenAI from 'openai/index'; +import { z } from 'zod'; + +const Table = z.enum(['orders', 'customers', 'products']); + +const Column = z.enum([ + 'id', + 'status', + 'expected_delivery_date', + 'delivered_at', + 'shipped_at', + 'ordered_at', + 'canceled_at', +]); + +const Operator = z.enum(['=', '>', '<', '<=', '>=', '!=']); + +const OrderBy = z.enum(['asc', 'desc']); + +const DynamicValue = z.object({ + column_name: z.string(), +}); + +const Condition = z.object({ + column: z.string(), + operator: Operator, + value: z.union([z.string(), z.number(), DynamicValue]), +}); + +const Query = z.object({ + table_name: Table, + columns: z.array(Column), + conditions: z.array(Condition), + order_by: OrderBy, +}); + +async function main() { + const client = new OpenAI(); + + const completion = await client.beta.chat.completions.parse({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'system', + content: + 'You are a helpful assistant. The current date is August 6, 2024. You help users query for the data they are looking for by calling the query function.', + }, + { + role: 'user', + content: + 'look up all my orders in november of last year that were fulfilled but not delivered on time', + }, + ], + tools: [zodFunction({ name: 'query', parameters: Query })], + }); + console.dir(completion, { depth: 10 }); + + const toolCall = completion.choices[0]?.message.tool_calls?.[0]; + if (toolCall) { + const args = toolCall.function.parsed_arguments as z.infer; + console.log(args); + console.log(args.table_name); + } +} + +main(); diff --git a/examples/parsing.ts b/examples/parsing.ts new file mode 100644 index 000000000..d92cc2720 --- /dev/null +++ b/examples/parsing.ts @@ -0,0 +1,36 @@ +import { zodResponseFormat } from 'openai/helpers/zod'; +import OpenAI from 'openai/index'; +import { z } from 'zod'; + +const Step = z.object({ + explanation: z.string(), + output: z.string(), +}); + +const MathResponse = z.object({ + steps: z.array(Step), + final_answer: z.string(), +}); + +async function main() { + const client = new OpenAI(); + + const completion = await client.beta.chat.completions.parse({ + model: 'gpt-4o-2024-08-06', + messages: [ + { role: 'system', content: 'You are a helpful math tutor.' }, + { role: 'user', content: 'solve 8x + 31 = 2' }, + ], + response_format: zodResponseFormat(MathResponse, 'math_response'), + }); + + console.dir(completion, { depth: 5 }); + + const message = completion.choices[0]?.message; + if (message?.parsed) { + console.log(message.parsed.steps); + console.log(`answer: ${message.parsed.final_answer}`); + } +} + +main(); diff --git a/examples/stream.ts b/examples/stream.ts index f3b712e8e..86dbde8b8 100644 --- a/examples/stream.ts +++ b/examples/stream.ts @@ -5,7 +5,7 @@ import OpenAI from 'openai'; const openai = new OpenAI(); async function main() { - const runner = await openai.beta.chat.completions + const runner = openai.beta.chat.completions .stream({ model: 'gpt-3.5-turbo', messages: [{ role: 'user', content: 'Say this is a test' }], diff --git a/examples/tool-call-helpers-zod.ts b/examples/tool-call-helpers-zod.ts index e02c743be..700f401a6 100755 --- a/examples/tool-call-helpers-zod.ts +++ b/examples/tool-call-helpers-zod.ts @@ -36,7 +36,7 @@ async function getBook({ id }: GetParams) { } async function main() { - const runner = await openai.beta.chat.completions + const runner = openai.beta.chat.completions .runTools({ model: 'gpt-4-1106-preview', stream: true, diff --git a/helpers.md b/helpers.md index dda1ab26b..abf980c82 100644 --- a/helpers.md +++ b/helpers.md @@ -1,4 +1,133 @@ -# Helpers +# Structured Outputs Parsing Helpers + +The OpenAI API supports extracting JSON from the model with the `response_format` request param, for more details on the API, see [this guide](https://platform.openai.com/docs/guides/structured-outputs). + +The SDK provides a `client.beta.chat.completions.parse()` method which is a wrapper over the `client.chat.completions.create()` that +provides richer integrations with TS specific types & returns a `ParsedChatCompletion` object, which is an extension of the standard `ChatCompletion` type. + +## Auto-parsing response content with Zod schemas + +You can pass zod schemas wrapped with `zodResponseFormat()` to the `.parse()` method and the SDK will automatically conver the model +into a JSON schema, send it to the API and parse the response content back using the given zod schema. + +```ts +import { zodResponseFormat } from 'openai/helpers/zod'; +import OpenAI from 'openai/index'; +import { z } from 'zod'; + +const Step = z.object({ + explanation: z.string(), + output: z.string(), +}); + +const MathResponse = z.object({ + steps: z.array(Step), + final_answer: z.string(), +}); + +const client = new OpenAI(); + +const completion = await client.beta.chat.completions.parse({ + model: 'gpt-4o-2024-08-06', + messages: [ + { role: 'system', content: 'You are a helpful math tutor.' }, + { role: 'user', content: 'solve 8x + 31 = 2' }, + ], + response_format: zodResponseFormat(MathResponse, 'math_response'), +}); + +console.dir(completion, { depth: 5 }); + +const message = completion.choices[0]?.message; +if (message?.parsed) { + console.log(message.parsed.steps); + console.log(`answer: ${message.parsed.final_answer}`); +} +``` + +## Auto-parsing function tool calls + +The `.parse()` method will also automatically parse `function` tool calls if: + +- You use the `zodFunctionTool()` helper method +- You mark your tool schema with `"strict": True` + +For example: + +```ts +import { zodFunction } from 'openai/helpers/zod'; +import OpenAI from 'openai/index'; +import { z } from 'zod'; + +const Table = z.enum(['orders', 'customers', 'products']); + +const Column = z.enum([ + 'id', + 'status', + 'expected_delivery_date', + 'delivered_at', + 'shipped_at', + 'ordered_at', + 'canceled_at', +]); + +const Operator = z.enum(['=', '>', '<', '<=', '>=', '!=']); + +const OrderBy = z.enum(['asc', 'desc']); + +const DynamicValue = z.object({ + column_name: z.string(), +}); + +const Condition = z.object({ + column: z.string(), + operator: Operator, + value: z.union([z.string(), z.number(), DynamicValue]), +}); + +const Query = z.object({ + table_name: Table, + columns: z.array(Column), + conditions: z.array(Condition), + order_by: OrderBy, +}); + +const client = new OpenAI(); +const completion = await client.beta.chat.completions.parse({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'system', + content: + 'You are a helpful assistant. The current date is August 6, 2024. You help users query for the data they are looking for by calling the query function.', + }, + { + role: 'user', + content: 'look up all my orders in november of last year that were fulfilled but not delivered on time', + }, + ], + tools: [zodFunction({ name: 'query', parameters: Query })], +}); +console.dir(completion, { depth: 10 }); + +const toolCall = completion.choices[0]?.message.tool_calls?.[0]; +if (toolCall) { + const args = toolCall.function.parsed_arguments as z.infer; + console.log(args); + console.log(args.table_name); +} + +main(); +``` + +### Differences from `.create()` + +The `beta.chat.completions.parse()` method imposes some additional restrictions on it's usage that `chat.completions.create()` does not. + +- If the completion completes with `finish_reason` set to `length` or `content_filter`, the `LengthFinishReasonError` / `ContentFilterFinishReasonError` errors will be raised. +- Only strict function tools can be passed, e.g. `{type: 'function', function: {..., strict: true}}` + +# Streaming Helpers OpenAI supports streaming responses when interacting with the [Chat](#chat-streaming) or [Assistant](#assistant-streaming-api) APIs. @@ -265,6 +394,69 @@ The event fired when a function call is made by the assistant. The event fired when the function runner responds to the function call with `role: "function"`. The `content` of the response is given as the first argument to the callback. +#### `.on('content.delta', (props: ContentDeltaEvent) => ...)` + +The event fired for every chunk containing new content. The `props` object contains: +- `delta`: The new content string received in this chunk +- `snapshot`: The accumulated content so far +- `parsed`: The partially parsed content (if applicable) + +#### `.on('content.done', (props: ContentDoneEvent) => ...)` + +The event fired when the content generation is complete. The `props` object contains: +- `content`: The full generated content +- `parsed`: The fully parsed content (if applicable) + +#### `.on('refusal.delta', (props: RefusalDeltaEvent) => ...)` + +The event fired when a chunk contains part of a content refusal. The `props` object contains: +- `delta`: The new refusal content string received in this chunk +- `snapshot`: The accumulated refusal content string so far + +#### `.on('refusal.done', (props: RefusalDoneEvent) => ...)` + +The event fired when the refusal content is complete. The `props` object contains: +- `refusal`: The full refusal content + +#### `.on('tool_calls.function.arguments.delta', (props: FunctionToolCallArgumentsDeltaEvent) => ...)` + +The event fired when a chunk contains part of a function tool call's arguments. The `props` object contains: +- `name`: The name of the function being called +- `index`: The index of the tool call +- `arguments`: The accumulated raw JSON string of arguments +- `parsed_arguments`: The partially parsed arguments object +- `arguments_delta`: The new JSON string fragment received in this chunk + +#### `.on('tool_calls.function.arguments.done', (props: FunctionToolCallArgumentsDoneEvent) => ...)` + +The event fired when a function tool call's arguments are complete. The `props` object contains: +- `name`: The name of the function being called +- `index`: The index of the tool call +- `arguments`: The full raw JSON string of arguments +- `parsed_arguments`: The fully parsed arguments object + +#### `.on('logprobs.content.delta', (props: LogProbsContentDeltaEvent) => ...)` + +The event fired when a chunk contains new content log probabilities. The `props` object contains: +- `content`: A list of the new log probabilities received in this chunk +- `snapshot`: A list of the accumulated log probabilities so far + +#### `.on('logprobs.content.done', (props: LogProbsContentDoneEvent) => ...)` + +The event fired when all content log probabilities have been received. The `props` object contains: +- `content`: The full list of token log probabilities for the content + +#### `.on('logprobs.refusal.delta', (props: LogProbsRefusalDeltaEvent) => ...)` + +The event fired when a chunk contains new refusal log probabilities. The `props` object contains: +- `refusal`: A list of the new log probabilities received in this chunk +- `snapshot`: A list of the accumulated log probabilities so far + +#### `.on('logprobs.refusal.done', (props: LogProbsRefusalDoneEvent) => ...)` + +The event fired when all refusal log probabilities have been received. The `props` object contains: +- `refusal`: The full list of token log probabilities for the refusal + #### `.on('finalChatCompletion', (completion: ChatCompletion) => …)` The event fired for the final chat completion. If the function call runner exceeds the number diff --git a/jest.config.ts b/jest.config.ts index 56d824cdc..aa2853fd2 100644 --- a/jest.config.ts +++ b/jest.config.ts @@ -18,6 +18,7 @@ const config: JestConfigWithTsJest = { '/deno_tests/', ], testPathIgnorePatterns: ['scripts'], + prettierPath: require.resolve('prettier-2'), }; export default config; diff --git a/package.json b/package.json index 3f6c722d1..2391c1155 100644 --- a/package.json +++ b/package.json @@ -43,12 +43,14 @@ "eslint-plugin-unused-imports": "^3.0.0", "jest": "^29.4.0", "prettier": "^3.0.0", + "prettier-2": "npm:prettier@^2", "ts-jest": "^29.1.0", "ts-morph": "^19.0.0", "ts-node": "^10.5.0", "tsc-multi": "^1.1.0", "tsconfig-paths": "^4.0.0", - "typescript": "^4.8.2" + "typescript": "^4.8.2", + "zod": "^3.23.8" }, "sideEffects": [ "./_shims/index.js", @@ -120,5 +122,13 @@ "default": "./dist/*.mjs" } }, - "bin": "./bin/cli" + "bin": "./bin/cli", + "peerDependencies": { + "zod": "^3.23.8" + }, + "peerDependenciesMeta": { + "zod": { + "optional": true + } + } } diff --git a/scripts/build b/scripts/build index aa7c61f02..b4d686af5 100755 --- a/scripts/build +++ b/scripts/build @@ -50,7 +50,7 @@ node scripts/utils/postprocess-files.cjs (cd dist && node -e 'require("openai")') (cd dist && node -e 'import("openai")' --input-type=module) -if command -v deno &> /dev/null && [ -e ./scripts/build-deno ] +if [ "${OPENAI_DISABLE_DENO_BUILD:-0}" != "1" ] && command -v deno &> /dev/null && [ -e ./scripts/build-deno ] then ./scripts/build-deno fi diff --git a/src/_vendor/partial-json-parser/README.md b/src/_vendor/partial-json-parser/README.md new file mode 100644 index 000000000..bc6ea4e3d --- /dev/null +++ b/src/_vendor/partial-json-parser/README.md @@ -0,0 +1,3 @@ +# Partial JSON Parser + +Vendored from https://www.npmjs.com/package/partial-json-parser and updated to use TypeScript. diff --git a/src/_vendor/partial-json-parser/parser.ts b/src/_vendor/partial-json-parser/parser.ts new file mode 100644 index 000000000..9470c462f --- /dev/null +++ b/src/_vendor/partial-json-parser/parser.ts @@ -0,0 +1,264 @@ +type Token = { + type: string; + value: string; +}; + +const tokenize = (input: string): Token[] => { + let current = 0; + let tokens: Token[] = []; + + while (current < input.length) { + let char = input[current]; + + if (char === '\\') { + current++; + continue; + } + + if (char === '{') { + tokens.push({ + type: 'brace', + value: '{', + }); + + current++; + continue; + } + + if (char === '}') { + tokens.push({ + type: 'brace', + value: '}', + }); + + current++; + continue; + } + + if (char === '[') { + tokens.push({ + type: 'paren', + value: '[', + }); + + current++; + continue; + } + + if (char === ']') { + tokens.push({ + type: 'paren', + value: ']', + }); + + current++; + continue; + } + + if (char === ':') { + tokens.push({ + type: 'separator', + value: ':', + }); + + current++; + continue; + } + + if (char === ',') { + tokens.push({ + type: 'delimiter', + value: ',', + }); + + current++; + continue; + } + + if (char === '"') { + let value = ''; + let danglingQuote = false; + + char = input[++current]; + + while (char !== '"') { + if (current === input.length) { + danglingQuote = true; + break; + } + + if (char === '\\') { + current++; + if (current === input.length) { + danglingQuote = true; + break; + } + value += char + input[current]; + char = input[++current]; + } else { + value += char; + char = input[++current]; + } + } + + char = input[++current]; + + if (!danglingQuote) { + tokens.push({ + type: 'string', + value, + }); + } + continue; + } + + let WHITESPACE = /\s/; + if (char && WHITESPACE.test(char)) { + current++; + continue; + } + + let NUMBERS = /[0-9]/; + if ((char && NUMBERS.test(char)) || char === '-' || char === '.') { + let value = ''; + + if (char === '-') { + value += char; + char = input[++current]; + } + + while ((char && NUMBERS.test(char)) || char === '.') { + value += char; + char = input[++current]; + } + + tokens.push({ + type: 'number', + value, + }); + continue; + } + + let LETTERS = /[a-z]/i; + if (char && LETTERS.test(char)) { + let value = ''; + + while (char && LETTERS.test(char)) { + if (current === input.length) { + break; + } + value += char; + char = input[++current]; + } + + if (value == 'true' || value == 'false' || value === 'null') { + tokens.push({ + type: 'name', + value, + }); + } else { + // unknown token, e.g. `nul` which isn't quite `null` + current++; + continue; + } + continue; + } + + current++; + } + + return tokens; + }, + strip = (tokens: Token[]): Token[] => { + if (tokens.length === 0) { + return tokens; + } + + let lastToken = tokens[tokens.length - 1]!; + + switch (lastToken.type) { + case 'separator': + tokens = tokens.slice(0, tokens.length - 1); + return strip(tokens); + break; + case 'number': + let lastCharacterOfLastToken = lastToken.value[lastToken.value.length - 1]; + if (lastCharacterOfLastToken === '.' || lastCharacterOfLastToken === '-') { + tokens = tokens.slice(0, tokens.length - 1); + return strip(tokens); + } + case 'string': + let tokenBeforeTheLastToken = tokens[tokens.length - 2]; + if (tokenBeforeTheLastToken?.type === 'delimiter') { + tokens = tokens.slice(0, tokens.length - 1); + return strip(tokens); + } else if (tokenBeforeTheLastToken?.type === 'brace' && tokenBeforeTheLastToken.value === '{') { + tokens = tokens.slice(0, tokens.length - 1); + return strip(tokens); + } + break; + case 'delimiter': + tokens = tokens.slice(0, tokens.length - 1); + return strip(tokens); + break; + } + + return tokens; + }, + unstrip = (tokens: Token[]): Token[] => { + let tail: string[] = []; + + tokens.map((token) => { + if (token.type === 'brace') { + if (token.value === '{') { + tail.push('}'); + } else { + tail.splice(tail.lastIndexOf('}'), 1); + } + } + if (token.type === 'paren') { + if (token.value === '[') { + tail.push(']'); + } else { + tail.splice(tail.lastIndexOf(']'), 1); + } + } + }); + + if (tail.length > 0) { + tail.reverse().map((item) => { + if (item === '}') { + tokens.push({ + type: 'brace', + value: '}', + }); + } else if (item === ']') { + tokens.push({ + type: 'paren', + value: ']', + }); + } + }); + } + + return tokens; + }, + generate = (tokens: Token[]): string => { + let output = ''; + + tokens.map((token) => { + switch (token.type) { + case 'string': + output += '"' + token.value + '"'; + break; + default: + output += token.value; + break; + } + }); + + return output; + }, + partialParse = (input: string): unknown => JSON.parse(generate(unstrip(strip(tokenize(input))))); + +export { partialParse }; diff --git a/src/_vendor/zod-to-json-schema/Options.ts b/src/_vendor/zod-to-json-schema/Options.ts new file mode 100644 index 000000000..dd04692f1 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/Options.ts @@ -0,0 +1,73 @@ +import { ZodSchema, ZodTypeDef } from 'zod'; +import { Refs, Seen } from './Refs'; +import { JsonSchema7Type } from './parseDef'; + +export type Targets = 'jsonSchema7' | 'jsonSchema2019-09' | 'openApi3'; + +export type DateStrategy = 'format:date-time' | 'format:date' | 'string' | 'integer'; + +export const ignoreOverride = Symbol('Let zodToJsonSchema decide on which parser to use'); + +export type Options = { + name: string | undefined; + $refStrategy: 'root' | 'relative' | 'none' | 'seen'; + basePath: string[]; + effectStrategy: 'input' | 'any'; + pipeStrategy: 'input' | 'output' | 'all'; + dateStrategy: DateStrategy | DateStrategy[]; + mapStrategy: 'entries' | 'record'; + removeAdditionalStrategy: 'passthrough' | 'strict'; + target: Target; + strictUnions: boolean; + definitionPath: string; + definitions: Record; + errorMessages: boolean; + markdownDescription: boolean; + patternStrategy: 'escape' | 'preserve'; + applyRegexFlags: boolean; + emailStrategy: 'format:email' | 'format:idn-email' | 'pattern:zod'; + base64Strategy: 'format:binary' | 'contentEncoding:base64' | 'pattern:zod'; + nameStrategy: 'ref' | 'title'; + override?: ( + def: ZodTypeDef, + refs: Refs, + seen: Seen | undefined, + forceResolution?: boolean, + ) => JsonSchema7Type | undefined | typeof ignoreOverride; + openaiStrictMode?: boolean; +}; + +export const defaultOptions: Options = { + name: undefined, + $refStrategy: 'root', + basePath: ['#'], + effectStrategy: 'input', + pipeStrategy: 'all', + dateStrategy: 'format:date-time', + mapStrategy: 'entries', + removeAdditionalStrategy: 'passthrough', + definitionPath: 'definitions', + target: 'jsonSchema7', + strictUnions: false, + definitions: {}, + errorMessages: false, + markdownDescription: false, + patternStrategy: 'escape', + applyRegexFlags: false, + emailStrategy: 'format:email', + base64Strategy: 'contentEncoding:base64', + nameStrategy: 'ref', +}; + +export const getDefaultOptions = ( + options: Partial> | string | undefined, +) => + (typeof options === 'string' ? + { + ...defaultOptions, + name: options, + } + : { + ...defaultOptions, + ...options, + }) as Options; diff --git a/src/_vendor/zod-to-json-schema/Refs.ts b/src/_vendor/zod-to-json-schema/Refs.ts new file mode 100644 index 000000000..6dad82f07 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/Refs.ts @@ -0,0 +1,39 @@ +import { ZodTypeDef } from 'zod'; +import { getDefaultOptions, Options, Targets } from './Options'; +import { JsonSchema7Type } from './parseDef'; + +export type Refs = { + seen: Map; + currentPath: string[]; + propertyPath: string[] | undefined; +} & Options; + +export type Seen = { + def: ZodTypeDef; + path: string[]; + jsonSchema: JsonSchema7Type | undefined; +}; + +export const getRefs = (options?: string | Partial>): Refs => { + const _options = getDefaultOptions(options); + const currentPath = + _options.name !== undefined ? + [..._options.basePath, _options.definitionPath, _options.name] + : _options.basePath; + return { + ..._options, + currentPath: currentPath, + propertyPath: undefined, + seen: new Map( + Object.entries(_options.definitions).map(([name, def]) => [ + def._def, + { + def: def._def, + path: [..._options.basePath, _options.definitionPath, name], + // Resolution of references will be forced even though seen, so it's ok that the schema is undefined here for now. + jsonSchema: undefined, + }, + ]), + ), + }; +}; diff --git a/src/_vendor/zod-to-json-schema/errorMessages.ts b/src/_vendor/zod-to-json-schema/errorMessages.ts new file mode 100644 index 000000000..ceb0e8b73 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/errorMessages.ts @@ -0,0 +1,31 @@ +import { JsonSchema7TypeUnion } from './parseDef'; +import { Refs } from './Refs'; + +export type ErrorMessages = Partial< + Omit<{ [key in keyof T]: string }, OmitProperties | 'type' | 'errorMessages'> +>; + +export function addErrorMessage }>( + res: T, + key: keyof T, + errorMessage: string | undefined, + refs: Refs, +) { + if (!refs?.errorMessages) return; + if (errorMessage) { + res.errorMessage = { + ...res.errorMessage, + [key]: errorMessage, + }; + } +} + +export function setResponseValueAndErrors< + Json7Type extends JsonSchema7TypeUnion & { + errorMessage?: ErrorMessages; + }, + Key extends keyof Omit, +>(res: Json7Type, key: Key, value: Json7Type[Key], errorMessage: string | undefined, refs: Refs) { + res[key] = value; + addErrorMessage(res, key, errorMessage, refs); +} diff --git a/src/_vendor/zod-to-json-schema/index.ts b/src/_vendor/zod-to-json-schema/index.ts new file mode 100644 index 000000000..5808bc280 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/index.ts @@ -0,0 +1,37 @@ +export * from './Options'; +export * from './Refs'; +export * from './errorMessages'; +export * from './parseDef'; +export * from './parsers/any'; +export * from './parsers/array'; +export * from './parsers/bigint'; +export * from './parsers/boolean'; +export * from './parsers/branded'; +export * from './parsers/catch'; +export * from './parsers/date'; +export * from './parsers/default'; +export * from './parsers/effects'; +export * from './parsers/enum'; +export * from './parsers/intersection'; +export * from './parsers/literal'; +export * from './parsers/map'; +export * from './parsers/nativeEnum'; +export * from './parsers/never'; +export * from './parsers/null'; +export * from './parsers/nullable'; +export * from './parsers/number'; +export * from './parsers/object'; +export * from './parsers/optional'; +export * from './parsers/pipeline'; +export * from './parsers/promise'; +export * from './parsers/readonly'; +export * from './parsers/record'; +export * from './parsers/set'; +export * from './parsers/string'; +export * from './parsers/tuple'; +export * from './parsers/undefined'; +export * from './parsers/union'; +export * from './parsers/unknown'; +export * from './zodToJsonSchema'; +import { zodToJsonSchema } from './zodToJsonSchema'; +export default zodToJsonSchema; diff --git a/src/_vendor/zod-to-json-schema/parseDef.ts b/src/_vendor/zod-to-json-schema/parseDef.ts new file mode 100644 index 000000000..c22fc33eb --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parseDef.ts @@ -0,0 +1,231 @@ +import { ZodFirstPartyTypeKind, ZodTypeDef } from 'zod'; +import { JsonSchema7AnyType, parseAnyDef } from './parsers/any'; +import { JsonSchema7ArrayType, parseArrayDef } from './parsers/array'; +import { JsonSchema7BigintType, parseBigintDef } from './parsers/bigint'; +import { JsonSchema7BooleanType, parseBooleanDef } from './parsers/boolean'; +import { parseBrandedDef } from './parsers/branded'; +import { parseCatchDef } from './parsers/catch'; +import { JsonSchema7DateType, parseDateDef } from './parsers/date'; +import { parseDefaultDef } from './parsers/default'; +import { parseEffectsDef } from './parsers/effects'; +import { JsonSchema7EnumType, parseEnumDef } from './parsers/enum'; +import { JsonSchema7AllOfType, parseIntersectionDef } from './parsers/intersection'; +import { JsonSchema7LiteralType, parseLiteralDef } from './parsers/literal'; +import { JsonSchema7MapType, parseMapDef } from './parsers/map'; +import { JsonSchema7NativeEnumType, parseNativeEnumDef } from './parsers/nativeEnum'; +import { JsonSchema7NeverType, parseNeverDef } from './parsers/never'; +import { JsonSchema7NullType, parseNullDef } from './parsers/null'; +import { JsonSchema7NullableType, parseNullableDef } from './parsers/nullable'; +import { JsonSchema7NumberType, parseNumberDef } from './parsers/number'; +import { JsonSchema7ObjectType, parseObjectDef } from './parsers/object'; +import { parseOptionalDef } from './parsers/optional'; +import { parsePipelineDef } from './parsers/pipeline'; +import { parsePromiseDef } from './parsers/promise'; +import { JsonSchema7RecordType, parseRecordDef } from './parsers/record'; +import { JsonSchema7SetType, parseSetDef } from './parsers/set'; +import { JsonSchema7StringType, parseStringDef } from './parsers/string'; +import { JsonSchema7TupleType, parseTupleDef } from './parsers/tuple'; +import { JsonSchema7UndefinedType, parseUndefinedDef } from './parsers/undefined'; +import { JsonSchema7UnionType, parseUnionDef } from './parsers/union'; +import { JsonSchema7UnknownType, parseUnknownDef } from './parsers/unknown'; +import { Refs, Seen } from './Refs'; +import { parseReadonlyDef } from './parsers/readonly'; +import { ignoreOverride } from './Options'; + +type JsonSchema7RefType = { $ref: string }; +type JsonSchema7Meta = { + title?: string; + default?: any; + description?: string; + markdownDescription?: string; +}; + +export type JsonSchema7TypeUnion = + | JsonSchema7StringType + | JsonSchema7ArrayType + | JsonSchema7NumberType + | JsonSchema7BigintType + | JsonSchema7BooleanType + | JsonSchema7DateType + | JsonSchema7EnumType + | JsonSchema7LiteralType + | JsonSchema7NativeEnumType + | JsonSchema7NullType + | JsonSchema7NumberType + | JsonSchema7ObjectType + | JsonSchema7RecordType + | JsonSchema7TupleType + | JsonSchema7UnionType + | JsonSchema7UndefinedType + | JsonSchema7RefType + | JsonSchema7NeverType + | JsonSchema7MapType + | JsonSchema7AnyType + | JsonSchema7NullableType + | JsonSchema7AllOfType + | JsonSchema7UnknownType + | JsonSchema7SetType; + +export type JsonSchema7Type = JsonSchema7TypeUnion & JsonSchema7Meta; + +export function parseDef( + def: ZodTypeDef, + refs: Refs, + forceResolution = false, // Forces a new schema to be instantiated even though its def has been seen. Used for improving refs in definitions. See https://github.com/StefanTerdell/zod-to-json-schema/pull/61. +): JsonSchema7Type | undefined { + const seenItem = refs.seen.get(def); + + if (refs.override) { + const overrideResult = refs.override?.(def, refs, seenItem, forceResolution); + + if (overrideResult !== ignoreOverride) { + return overrideResult; + } + } + + if (seenItem && !forceResolution) { + const seenSchema = get$ref(seenItem, refs); + + if (seenSchema !== undefined) { + return seenSchema; + } + } + + const newItem: Seen = { def, path: refs.currentPath, jsonSchema: undefined }; + + refs.seen.set(def, newItem); + + const jsonSchema = selectParser(def, (def as any).typeName, refs); + + if (jsonSchema) { + addMeta(def, refs, jsonSchema); + } + + newItem.jsonSchema = jsonSchema; + + return jsonSchema; +} + +const get$ref = ( + item: Seen, + refs: Refs, +): + | { + $ref: string; + } + | {} + | undefined => { + switch (refs.$refStrategy) { + case 'root': + return { $ref: item.path.join('/') }; + case 'relative': + return { $ref: getRelativePath(refs.currentPath, item.path) }; + case 'none': + case 'seen': { + if ( + item.path.length < refs.currentPath.length && + item.path.every((value, index) => refs.currentPath[index] === value) + ) { + console.warn(`Recursive reference detected at ${refs.currentPath.join('/')}! Defaulting to any`); + + return {}; + } + + return refs.$refStrategy === 'seen' ? {} : undefined; + } + } +}; + +const getRelativePath = (pathA: string[], pathB: string[]) => { + let i = 0; + for (; i < pathA.length && i < pathB.length; i++) { + if (pathA[i] !== pathB[i]) break; + } + return [(pathA.length - i).toString(), ...pathB.slice(i)].join('/'); +}; + +const selectParser = (def: any, typeName: ZodFirstPartyTypeKind, refs: Refs): JsonSchema7Type | undefined => { + switch (typeName) { + case ZodFirstPartyTypeKind.ZodString: + return parseStringDef(def, refs); + case ZodFirstPartyTypeKind.ZodNumber: + return parseNumberDef(def, refs); + case ZodFirstPartyTypeKind.ZodObject: + return parseObjectDef(def, refs); + case ZodFirstPartyTypeKind.ZodBigInt: + return parseBigintDef(def, refs); + case ZodFirstPartyTypeKind.ZodBoolean: + return parseBooleanDef(); + case ZodFirstPartyTypeKind.ZodDate: + return parseDateDef(def, refs); + case ZodFirstPartyTypeKind.ZodUndefined: + return parseUndefinedDef(); + case ZodFirstPartyTypeKind.ZodNull: + return parseNullDef(refs); + case ZodFirstPartyTypeKind.ZodArray: + return parseArrayDef(def, refs); + case ZodFirstPartyTypeKind.ZodUnion: + case ZodFirstPartyTypeKind.ZodDiscriminatedUnion: + return parseUnionDef(def, refs); + case ZodFirstPartyTypeKind.ZodIntersection: + return parseIntersectionDef(def, refs); + case ZodFirstPartyTypeKind.ZodTuple: + return parseTupleDef(def, refs); + case ZodFirstPartyTypeKind.ZodRecord: + return parseRecordDef(def, refs); + case ZodFirstPartyTypeKind.ZodLiteral: + return parseLiteralDef(def, refs); + case ZodFirstPartyTypeKind.ZodEnum: + return parseEnumDef(def); + case ZodFirstPartyTypeKind.ZodNativeEnum: + return parseNativeEnumDef(def); + case ZodFirstPartyTypeKind.ZodNullable: + return parseNullableDef(def, refs); + case ZodFirstPartyTypeKind.ZodOptional: + return parseOptionalDef(def, refs); + case ZodFirstPartyTypeKind.ZodMap: + return parseMapDef(def, refs); + case ZodFirstPartyTypeKind.ZodSet: + return parseSetDef(def, refs); + case ZodFirstPartyTypeKind.ZodLazy: + return parseDef(def.getter()._def, refs); + case ZodFirstPartyTypeKind.ZodPromise: + return parsePromiseDef(def, refs); + case ZodFirstPartyTypeKind.ZodNaN: + case ZodFirstPartyTypeKind.ZodNever: + return parseNeverDef(); + case ZodFirstPartyTypeKind.ZodEffects: + return parseEffectsDef(def, refs); + case ZodFirstPartyTypeKind.ZodAny: + return parseAnyDef(); + case ZodFirstPartyTypeKind.ZodUnknown: + return parseUnknownDef(); + case ZodFirstPartyTypeKind.ZodDefault: + return parseDefaultDef(def, refs); + case ZodFirstPartyTypeKind.ZodBranded: + return parseBrandedDef(def, refs); + case ZodFirstPartyTypeKind.ZodReadonly: + return parseReadonlyDef(def, refs); + case ZodFirstPartyTypeKind.ZodCatch: + return parseCatchDef(def, refs); + case ZodFirstPartyTypeKind.ZodPipeline: + return parsePipelineDef(def, refs); + case ZodFirstPartyTypeKind.ZodFunction: + case ZodFirstPartyTypeKind.ZodVoid: + case ZodFirstPartyTypeKind.ZodSymbol: + return undefined; + default: + return ((_: never) => undefined)(typeName); + } +}; + +const addMeta = (def: ZodTypeDef, refs: Refs, jsonSchema: JsonSchema7Type): JsonSchema7Type => { + if (def.description) { + jsonSchema.description = def.description; + + if (refs.markdownDescription) { + jsonSchema.markdownDescription = def.description; + } + } + return jsonSchema; +}; diff --git a/src/_vendor/zod-to-json-schema/parsers/any.ts b/src/_vendor/zod-to-json-schema/parsers/any.ts new file mode 100644 index 000000000..68c2921da --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/any.ts @@ -0,0 +1,5 @@ +export type JsonSchema7AnyType = {}; + +export function parseAnyDef(): JsonSchema7AnyType { + return {}; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/array.ts b/src/_vendor/zod-to-json-schema/parsers/array.ts new file mode 100644 index 000000000..3e8578f8b --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/array.ts @@ -0,0 +1,36 @@ +import { ZodArrayDef, ZodFirstPartyTypeKind } from 'zod'; +import { ErrorMessages, setResponseValueAndErrors } from '../errorMessages'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; + +export type JsonSchema7ArrayType = { + type: 'array'; + items?: JsonSchema7Type | undefined; + minItems?: number; + maxItems?: number; + errorMessages?: ErrorMessages; +}; + +export function parseArrayDef(def: ZodArrayDef, refs: Refs) { + const res: JsonSchema7ArrayType = { + type: 'array', + }; + if (def.type?._def?.typeName !== ZodFirstPartyTypeKind.ZodAny) { + res.items = parseDef(def.type._def, { + ...refs, + currentPath: [...refs.currentPath, 'items'], + }); + } + + if (def.minLength) { + setResponseValueAndErrors(res, 'minItems', def.minLength.value, def.minLength.message, refs); + } + if (def.maxLength) { + setResponseValueAndErrors(res, 'maxItems', def.maxLength.value, def.maxLength.message, refs); + } + if (def.exactLength) { + setResponseValueAndErrors(res, 'minItems', def.exactLength.value, def.exactLength.message, refs); + setResponseValueAndErrors(res, 'maxItems', def.exactLength.value, def.exactLength.message, refs); + } + return res; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/bigint.ts b/src/_vendor/zod-to-json-schema/parsers/bigint.ts new file mode 100644 index 000000000..f46784184 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/bigint.ts @@ -0,0 +1,60 @@ +import { ZodBigIntDef } from 'zod'; +import { Refs } from '../Refs'; +import { ErrorMessages, setResponseValueAndErrors } from '../errorMessages'; + +export type JsonSchema7BigintType = { + type: 'integer'; + format: 'int64'; + minimum?: BigInt; + exclusiveMinimum?: BigInt; + maximum?: BigInt; + exclusiveMaximum?: BigInt; + multipleOf?: BigInt; + errorMessage?: ErrorMessages; +}; + +export function parseBigintDef(def: ZodBigIntDef, refs: Refs): JsonSchema7BigintType { + const res: JsonSchema7BigintType = { + type: 'integer', + format: 'int64', + }; + + if (!def.checks) return res; + + for (const check of def.checks) { + switch (check.kind) { + case 'min': + if (refs.target === 'jsonSchema7') { + if (check.inclusive) { + setResponseValueAndErrors(res, 'minimum', check.value, check.message, refs); + } else { + setResponseValueAndErrors(res, 'exclusiveMinimum', check.value, check.message, refs); + } + } else { + if (!check.inclusive) { + res.exclusiveMinimum = true as any; + } + setResponseValueAndErrors(res, 'minimum', check.value, check.message, refs); + } + break; + case 'max': + if (refs.target === 'jsonSchema7') { + if (check.inclusive) { + setResponseValueAndErrors(res, 'maximum', check.value, check.message, refs); + } else { + setResponseValueAndErrors(res, 'exclusiveMaximum', check.value, check.message, refs); + } + } else { + if (!check.inclusive) { + res.exclusiveMaximum = true as any; + } + setResponseValueAndErrors(res, 'maximum', check.value, check.message, refs); + } + break; + case 'multipleOf': + setResponseValueAndErrors(res, 'multipleOf', check.value, check.message, refs); + break; + } + } + return res; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/boolean.ts b/src/_vendor/zod-to-json-schema/parsers/boolean.ts new file mode 100644 index 000000000..715e41acc --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/boolean.ts @@ -0,0 +1,9 @@ +export type JsonSchema7BooleanType = { + type: 'boolean'; +}; + +export function parseBooleanDef(): JsonSchema7BooleanType { + return { + type: 'boolean', + }; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/branded.ts b/src/_vendor/zod-to-json-schema/parsers/branded.ts new file mode 100644 index 000000000..2242580a5 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/branded.ts @@ -0,0 +1,7 @@ +import { ZodBrandedDef } from 'zod'; +import { parseDef } from '../parseDef'; +import { Refs } from '../Refs'; + +export function parseBrandedDef(_def: ZodBrandedDef, refs: Refs) { + return parseDef(_def.type._def, refs); +} diff --git a/src/_vendor/zod-to-json-schema/parsers/catch.ts b/src/_vendor/zod-to-json-schema/parsers/catch.ts new file mode 100644 index 000000000..5cce3afa1 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/catch.ts @@ -0,0 +1,7 @@ +import { ZodCatchDef } from 'zod'; +import { parseDef } from '../parseDef'; +import { Refs } from '../Refs'; + +export const parseCatchDef = (def: ZodCatchDef, refs: Refs) => { + return parseDef(def.innerType._def, refs); +}; diff --git a/src/_vendor/zod-to-json-schema/parsers/date.ts b/src/_vendor/zod-to-json-schema/parsers/date.ts new file mode 100644 index 000000000..4afc4e8dc --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/date.ts @@ -0,0 +1,83 @@ +import { ZodDateDef } from 'zod'; +import { Refs } from '../Refs'; +import { ErrorMessages, setResponseValueAndErrors } from '../errorMessages'; +import { JsonSchema7NumberType } from './number'; +import { DateStrategy } from '../Options'; + +export type JsonSchema7DateType = + | { + type: 'integer' | 'string'; + format: 'unix-time' | 'date-time' | 'date'; + minimum?: number; + maximum?: number; + errorMessage?: ErrorMessages; + } + | { + anyOf: JsonSchema7DateType[]; + }; + +export function parseDateDef( + def: ZodDateDef, + refs: Refs, + overrideDateStrategy?: DateStrategy, +): JsonSchema7DateType { + const strategy = overrideDateStrategy ?? refs.dateStrategy; + + if (Array.isArray(strategy)) { + return { + anyOf: strategy.map((item, i) => parseDateDef(def, refs, item)), + }; + } + + switch (strategy) { + case 'string': + case 'format:date-time': + return { + type: 'string', + format: 'date-time', + }; + case 'format:date': + return { + type: 'string', + format: 'date', + }; + case 'integer': + return integerDateParser(def, refs); + } +} + +const integerDateParser = (def: ZodDateDef, refs: Refs) => { + const res: JsonSchema7DateType = { + type: 'integer', + format: 'unix-time', + }; + + if (refs.target === 'openApi3') { + return res; + } + + for (const check of def.checks) { + switch (check.kind) { + case 'min': + setResponseValueAndErrors( + res, + 'minimum', + check.value, // This is in milliseconds + check.message, + refs, + ); + break; + case 'max': + setResponseValueAndErrors( + res, + 'maximum', + check.value, // This is in milliseconds + check.message, + refs, + ); + break; + } + } + + return res; +}; diff --git a/src/_vendor/zod-to-json-schema/parsers/default.ts b/src/_vendor/zod-to-json-schema/parsers/default.ts new file mode 100644 index 000000000..f71726075 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/default.ts @@ -0,0 +1,10 @@ +import { ZodDefaultDef } from 'zod'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; + +export function parseDefaultDef(_def: ZodDefaultDef, refs: Refs): JsonSchema7Type & { default: any } { + return { + ...parseDef(_def.innerType._def, refs), + default: _def.defaultValue(), + }; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/effects.ts b/src/_vendor/zod-to-json-schema/parsers/effects.ts new file mode 100644 index 000000000..23d368987 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/effects.ts @@ -0,0 +1,7 @@ +import { ZodEffectsDef } from 'zod'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; + +export function parseEffectsDef(_def: ZodEffectsDef, refs: Refs): JsonSchema7Type | undefined { + return refs.effectStrategy === 'input' ? parseDef(_def.schema._def, refs) : {}; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/enum.ts b/src/_vendor/zod-to-json-schema/parsers/enum.ts new file mode 100644 index 000000000..d6f5ceb24 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/enum.ts @@ -0,0 +1,13 @@ +import { ZodEnumDef } from 'zod'; + +export type JsonSchema7EnumType = { + type: 'string'; + enum: string[]; +}; + +export function parseEnumDef(def: ZodEnumDef): JsonSchema7EnumType { + return { + type: 'string', + enum: [...def.values], + }; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/intersection.ts b/src/_vendor/zod-to-json-schema/parsers/intersection.ts new file mode 100644 index 000000000..af5f0421d --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/intersection.ts @@ -0,0 +1,64 @@ +import { ZodIntersectionDef } from 'zod'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; +import { JsonSchema7StringType } from './string'; + +export type JsonSchema7AllOfType = { + allOf: JsonSchema7Type[]; + unevaluatedProperties?: boolean; +}; + +const isJsonSchema7AllOfType = ( + type: JsonSchema7Type | JsonSchema7StringType, +): type is JsonSchema7AllOfType => { + if ('type' in type && type.type === 'string') return false; + return 'allOf' in type; +}; + +export function parseIntersectionDef( + def: ZodIntersectionDef, + refs: Refs, +): JsonSchema7AllOfType | JsonSchema7Type | undefined { + const allOf = [ + parseDef(def.left._def, { + ...refs, + currentPath: [...refs.currentPath, 'allOf', '0'], + }), + parseDef(def.right._def, { + ...refs, + currentPath: [...refs.currentPath, 'allOf', '1'], + }), + ].filter((x): x is JsonSchema7Type => !!x); + + let unevaluatedProperties: Pick | undefined = + refs.target === 'jsonSchema2019-09' ? { unevaluatedProperties: false } : undefined; + + const mergedAllOf: JsonSchema7Type[] = []; + // If either of the schemas is an allOf, merge them into a single allOf + allOf.forEach((schema) => { + if (isJsonSchema7AllOfType(schema)) { + mergedAllOf.push(...schema.allOf); + if (schema.unevaluatedProperties === undefined) { + // If one of the schemas has no unevaluatedProperties set, + // the merged schema should also have no unevaluatedProperties set + unevaluatedProperties = undefined; + } + } else { + let nestedSchema: JsonSchema7Type = schema; + if ('additionalProperties' in schema && schema.additionalProperties === false) { + const { additionalProperties, ...rest } = schema; + nestedSchema = rest; + } else { + // As soon as one of the schemas has additionalProperties set not to false, we allow unevaluatedProperties + unevaluatedProperties = undefined; + } + mergedAllOf.push(nestedSchema); + } + }); + return mergedAllOf.length ? + { + allOf: mergedAllOf, + ...unevaluatedProperties, + } + : undefined; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/literal.ts b/src/_vendor/zod-to-json-schema/parsers/literal.ts new file mode 100644 index 000000000..a35625cfc --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/literal.ts @@ -0,0 +1,37 @@ +import { ZodLiteralDef } from 'zod'; +import { Refs } from '../Refs'; + +export type JsonSchema7LiteralType = + | { + type: 'string' | 'number' | 'integer' | 'boolean'; + const: string | number | boolean; + } + | { + type: 'object' | 'array'; + }; + +export function parseLiteralDef(def: ZodLiteralDef, refs: Refs): JsonSchema7LiteralType { + const parsedType = typeof def.value; + if ( + parsedType !== 'bigint' && + parsedType !== 'number' && + parsedType !== 'boolean' && + parsedType !== 'string' + ) { + return { + type: Array.isArray(def.value) ? 'array' : 'object', + }; + } + + if (refs.target === 'openApi3') { + return { + type: parsedType === 'bigint' ? 'integer' : parsedType, + enum: [def.value], + } as any; + } + + return { + type: parsedType === 'bigint' ? 'integer' : parsedType, + const: def.value, + }; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/map.ts b/src/_vendor/zod-to-json-schema/parsers/map.ts new file mode 100644 index 000000000..5084ccd68 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/map.ts @@ -0,0 +1,42 @@ +import { ZodMapDef } from 'zod'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; +import { JsonSchema7RecordType, parseRecordDef } from './record'; + +export type JsonSchema7MapType = { + type: 'array'; + maxItems: 125; + items: { + type: 'array'; + items: [JsonSchema7Type, JsonSchema7Type]; + minItems: 2; + maxItems: 2; + }; +}; + +export function parseMapDef(def: ZodMapDef, refs: Refs): JsonSchema7MapType | JsonSchema7RecordType { + if (refs.mapStrategy === 'record') { + return parseRecordDef(def, refs); + } + + const keys = + parseDef(def.keyType._def, { + ...refs, + currentPath: [...refs.currentPath, 'items', 'items', '0'], + }) || {}; + const values = + parseDef(def.valueType._def, { + ...refs, + currentPath: [...refs.currentPath, 'items', 'items', '1'], + }) || {}; + return { + type: 'array', + maxItems: 125, + items: { + type: 'array', + items: [keys, values], + minItems: 2, + maxItems: 2, + }, + }; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/nativeEnum.ts b/src/_vendor/zod-to-json-schema/parsers/nativeEnum.ts new file mode 100644 index 000000000..a2ed901bb --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/nativeEnum.ts @@ -0,0 +1,27 @@ +import { ZodNativeEnumDef } from 'zod'; + +export type JsonSchema7NativeEnumType = { + type: 'string' | 'number' | ['string', 'number']; + enum: (string | number)[]; +}; + +export function parseNativeEnumDef(def: ZodNativeEnumDef): JsonSchema7NativeEnumType { + const object = def.values; + const actualKeys = Object.keys(def.values).filter((key: string) => { + return typeof object[object[key]!] !== 'number'; + }); + + const actualValues = actualKeys.map((key: string) => object[key]!); + + const parsedTypes = Array.from(new Set(actualValues.map((values: string | number) => typeof values))); + + return { + type: + parsedTypes.length === 1 ? + parsedTypes[0] === 'string' ? + 'string' + : 'number' + : ['string', 'number'], + enum: actualValues, + }; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/never.ts b/src/_vendor/zod-to-json-schema/parsers/never.ts new file mode 100644 index 000000000..a5c7383d7 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/never.ts @@ -0,0 +1,9 @@ +export type JsonSchema7NeverType = { + not: {}; +}; + +export function parseNeverDef(): JsonSchema7NeverType { + return { + not: {}, + }; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/null.ts b/src/_vendor/zod-to-json-schema/parsers/null.ts new file mode 100644 index 000000000..e1fe11362 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/null.ts @@ -0,0 +1,16 @@ +import { Refs } from '../Refs'; + +export type JsonSchema7NullType = { + type: 'null'; +}; + +export function parseNullDef(refs: Refs): JsonSchema7NullType { + return refs.target === 'openApi3' ? + ({ + enum: ['null'], + nullable: true, + } as any) + : { + type: 'null', + }; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/nullable.ts b/src/_vendor/zod-to-json-schema/parsers/nullable.ts new file mode 100644 index 000000000..efb70076e --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/nullable.ts @@ -0,0 +1,49 @@ +import { ZodNullableDef } from 'zod'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; +import { JsonSchema7NullType } from './null'; +import { primitiveMappings } from './union'; + +export type JsonSchema7NullableType = + | { + anyOf: [JsonSchema7Type, JsonSchema7NullType]; + } + | { + type: [string, 'null']; + }; + +export function parseNullableDef(def: ZodNullableDef, refs: Refs): JsonSchema7NullableType | undefined { + if ( + ['ZodString', 'ZodNumber', 'ZodBigInt', 'ZodBoolean', 'ZodNull'].includes(def.innerType._def.typeName) && + (!def.innerType._def.checks || !def.innerType._def.checks.length) + ) { + if (refs.target === 'openApi3') { + return { + type: primitiveMappings[def.innerType._def.typeName as keyof typeof primitiveMappings], + nullable: true, + } as any; + } + + return { + type: [primitiveMappings[def.innerType._def.typeName as keyof typeof primitiveMappings], 'null'], + }; + } + + if (refs.target === 'openApi3') { + const base = parseDef(def.innerType._def, { + ...refs, + currentPath: [...refs.currentPath], + }); + + if (base && '$ref' in base) return { allOf: [base], nullable: true } as any; + + return base && ({ ...base, nullable: true } as any); + } + + const base = parseDef(def.innerType._def, { + ...refs, + currentPath: [...refs.currentPath, 'anyOf', '0'], + }); + + return base && { anyOf: [base, { type: 'null' }] }; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/number.ts b/src/_vendor/zod-to-json-schema/parsers/number.ts new file mode 100644 index 000000000..45a1f3c02 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/number.ts @@ -0,0 +1,62 @@ +import { ZodNumberDef } from 'zod'; +import { addErrorMessage, ErrorMessages, setResponseValueAndErrors } from '../errorMessages'; +import { Refs } from '../Refs'; + +export type JsonSchema7NumberType = { + type: 'number' | 'integer'; + minimum?: number; + exclusiveMinimum?: number; + maximum?: number; + exclusiveMaximum?: number; + multipleOf?: number; + errorMessage?: ErrorMessages; +}; + +export function parseNumberDef(def: ZodNumberDef, refs: Refs): JsonSchema7NumberType { + const res: JsonSchema7NumberType = { + type: 'number', + }; + + if (!def.checks) return res; + + for (const check of def.checks) { + switch (check.kind) { + case 'int': + res.type = 'integer'; + addErrorMessage(res, 'type', check.message, refs); + break; + case 'min': + if (refs.target === 'jsonSchema7') { + if (check.inclusive) { + setResponseValueAndErrors(res, 'minimum', check.value, check.message, refs); + } else { + setResponseValueAndErrors(res, 'exclusiveMinimum', check.value, check.message, refs); + } + } else { + if (!check.inclusive) { + res.exclusiveMinimum = true as any; + } + setResponseValueAndErrors(res, 'minimum', check.value, check.message, refs); + } + break; + case 'max': + if (refs.target === 'jsonSchema7') { + if (check.inclusive) { + setResponseValueAndErrors(res, 'maximum', check.value, check.message, refs); + } else { + setResponseValueAndErrors(res, 'exclusiveMaximum', check.value, check.message, refs); + } + } else { + if (!check.inclusive) { + res.exclusiveMaximum = true as any; + } + setResponseValueAndErrors(res, 'maximum', check.value, check.message, refs); + } + break; + case 'multipleOf': + setResponseValueAndErrors(res, 'multipleOf', check.value, check.message, refs); + break; + } + } + return res; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/object.ts b/src/_vendor/zod-to-json-schema/parsers/object.ts new file mode 100644 index 000000000..f2120c8fe --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/object.ts @@ -0,0 +1,63 @@ +import { ZodObjectDef } from 'zod'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; + +function decideAdditionalProperties(def: ZodObjectDef, refs: Refs) { + if (refs.removeAdditionalStrategy === 'strict') { + return def.catchall._def.typeName === 'ZodNever' ? + def.unknownKeys !== 'strict' + : parseDef(def.catchall._def, { + ...refs, + currentPath: [...refs.currentPath, 'additionalProperties'], + }) ?? true; + } else { + return def.catchall._def.typeName === 'ZodNever' ? + def.unknownKeys === 'passthrough' + : parseDef(def.catchall._def, { + ...refs, + currentPath: [...refs.currentPath, 'additionalProperties'], + }) ?? true; + } +} + +export type JsonSchema7ObjectType = { + type: 'object'; + properties: Record; + additionalProperties: boolean | JsonSchema7Type; + required?: string[]; +}; + +export function parseObjectDef(def: ZodObjectDef, refs: Refs) { + const result: JsonSchema7ObjectType = { + type: 'object', + ...Object.entries(def.shape()).reduce( + ( + acc: { + properties: Record; + required: string[]; + }, + [propName, propDef], + ) => { + if (propDef === undefined || propDef._def === undefined) return acc; + const parsedDef = parseDef(propDef._def, { + ...refs, + currentPath: [...refs.currentPath, 'properties', propName], + propertyPath: [...refs.currentPath, 'properties', propName], + }); + if (parsedDef === undefined) return acc; + return { + properties: { + ...acc.properties, + [propName]: parsedDef, + }, + required: + propDef.isOptional() && !refs.openaiStrictMode ? acc.required : [...acc.required, propName], + }; + }, + { properties: {}, required: [] }, + ), + additionalProperties: decideAdditionalProperties(def, refs), + }; + if (!result.required!.length) delete result.required; + return result; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/optional.ts b/src/_vendor/zod-to-json-schema/parsers/optional.ts new file mode 100644 index 000000000..9b3e9731f --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/optional.ts @@ -0,0 +1,25 @@ +import { ZodOptionalDef } from 'zod'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; + +export const parseOptionalDef = (def: ZodOptionalDef, refs: Refs): JsonSchema7Type | undefined => { + if (refs.currentPath.toString() === refs.propertyPath?.toString()) { + return parseDef(def.innerType._def, refs); + } + + const innerSchema = parseDef(def.innerType._def, { + ...refs, + currentPath: [...refs.currentPath, 'anyOf', '1'], + }); + + return innerSchema ? + { + anyOf: [ + { + not: {}, + }, + innerSchema, + ], + } + : {}; +}; diff --git a/src/_vendor/zod-to-json-schema/parsers/pipeline.ts b/src/_vendor/zod-to-json-schema/parsers/pipeline.ts new file mode 100644 index 000000000..7fdcbae02 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/pipeline.ts @@ -0,0 +1,28 @@ +import { ZodPipelineDef } from 'zod'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; +import { JsonSchema7AllOfType } from './intersection'; + +export const parsePipelineDef = ( + def: ZodPipelineDef, + refs: Refs, +): JsonSchema7AllOfType | JsonSchema7Type | undefined => { + if (refs.pipeStrategy === 'input') { + return parseDef(def.in._def, refs); + } else if (refs.pipeStrategy === 'output') { + return parseDef(def.out._def, refs); + } + + const a = parseDef(def.in._def, { + ...refs, + currentPath: [...refs.currentPath, 'allOf', '0'], + }); + const b = parseDef(def.out._def, { + ...refs, + currentPath: [...refs.currentPath, 'allOf', a ? '1' : '0'], + }); + + return { + allOf: [a, b].filter((x): x is JsonSchema7Type => x !== undefined), + }; +}; diff --git a/src/_vendor/zod-to-json-schema/parsers/promise.ts b/src/_vendor/zod-to-json-schema/parsers/promise.ts new file mode 100644 index 000000000..f586d1139 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/promise.ts @@ -0,0 +1,7 @@ +import { ZodPromiseDef } from 'zod'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; + +export function parsePromiseDef(def: ZodPromiseDef, refs: Refs): JsonSchema7Type | undefined { + return parseDef(def.type._def, refs); +} diff --git a/src/_vendor/zod-to-json-schema/parsers/readonly.ts b/src/_vendor/zod-to-json-schema/parsers/readonly.ts new file mode 100644 index 000000000..cecb937d3 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/readonly.ts @@ -0,0 +1,7 @@ +import { ZodReadonlyDef } from 'zod'; +import { parseDef } from '../parseDef'; +import { Refs } from '../Refs'; + +export const parseReadonlyDef = (def: ZodReadonlyDef, refs: Refs) => { + return parseDef(def.innerType._def, refs); +}; diff --git a/src/_vendor/zod-to-json-schema/parsers/record.ts b/src/_vendor/zod-to-json-schema/parsers/record.ts new file mode 100644 index 000000000..7eff507fb --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/record.ts @@ -0,0 +1,73 @@ +import { ZodFirstPartyTypeKind, ZodMapDef, ZodRecordDef, ZodTypeAny } from 'zod'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; +import { JsonSchema7EnumType } from './enum'; +import { JsonSchema7ObjectType } from './object'; +import { JsonSchema7StringType, parseStringDef } from './string'; + +type JsonSchema7RecordPropertyNamesType = + | Omit + | Omit; + +export type JsonSchema7RecordType = { + type: 'object'; + additionalProperties: JsonSchema7Type; + propertyNames?: JsonSchema7RecordPropertyNamesType; +}; + +export function parseRecordDef( + def: ZodRecordDef | ZodMapDef, + refs: Refs, +): JsonSchema7RecordType { + if (refs.target === 'openApi3' && def.keyType?._def.typeName === ZodFirstPartyTypeKind.ZodEnum) { + return { + type: 'object', + required: def.keyType._def.values, + properties: def.keyType._def.values.reduce( + (acc: Record, key: string) => ({ + ...acc, + [key]: + parseDef(def.valueType._def, { + ...refs, + currentPath: [...refs.currentPath, 'properties', key], + }) ?? {}, + }), + {}, + ), + additionalProperties: false, + } satisfies JsonSchema7ObjectType as any; + } + + const schema: JsonSchema7RecordType = { + type: 'object', + additionalProperties: + parseDef(def.valueType._def, { + ...refs, + currentPath: [...refs.currentPath, 'additionalProperties'], + }) ?? {}, + }; + + if (refs.target === 'openApi3') { + return schema; + } + + if (def.keyType?._def.typeName === ZodFirstPartyTypeKind.ZodString && def.keyType._def.checks?.length) { + const keyType: JsonSchema7RecordPropertyNamesType = Object.entries( + parseStringDef(def.keyType._def, refs), + ).reduce((acc, [key, value]) => (key === 'type' ? acc : { ...acc, [key]: value }), {}); + + return { + ...schema, + propertyNames: keyType, + }; + } else if (def.keyType?._def.typeName === ZodFirstPartyTypeKind.ZodEnum) { + return { + ...schema, + propertyNames: { + enum: def.keyType._def.values, + }, + }; + } + + return schema; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/set.ts b/src/_vendor/zod-to-json-schema/parsers/set.ts new file mode 100644 index 000000000..05fa9ed79 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/set.ts @@ -0,0 +1,36 @@ +import { ZodSetDef } from 'zod'; +import { ErrorMessages, setResponseValueAndErrors } from '../errorMessages'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; + +export type JsonSchema7SetType = { + type: 'array'; + uniqueItems: true; + items?: JsonSchema7Type | undefined; + minItems?: number; + maxItems?: number; + errorMessage?: ErrorMessages; +}; + +export function parseSetDef(def: ZodSetDef, refs: Refs): JsonSchema7SetType { + const items = parseDef(def.valueType._def, { + ...refs, + currentPath: [...refs.currentPath, 'items'], + }); + + const schema: JsonSchema7SetType = { + type: 'array', + uniqueItems: true, + items, + }; + + if (def.minSize) { + setResponseValueAndErrors(schema, 'minItems', def.minSize.value, def.minSize.message, refs); + } + + if (def.maxSize) { + setResponseValueAndErrors(schema, 'maxItems', def.maxSize.value, def.maxSize.message, refs); + } + + return schema; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/string.ts b/src/_vendor/zod-to-json-schema/parsers/string.ts new file mode 100644 index 000000000..daa1a954a --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/string.ts @@ -0,0 +1,400 @@ +// @ts-nocheck +import { ZodStringDef } from 'zod'; +import { ErrorMessages, setResponseValueAndErrors } from '../errorMessages'; +import { Refs } from '../Refs'; + +let emojiRegex: RegExp | undefined; + +/** + * Generated from the regular expressions found here as of 2024-05-22: + * https://github.com/colinhacks/zod/blob/master/src/types.ts. + * + * Expressions with /i flag have been changed accordingly. + */ +export const zodPatterns = { + /** + * `c` was changed to `[cC]` to replicate /i flag + */ + cuid: /^[cC][^\s-]{8,}$/, + cuid2: /^[0-9a-z]+$/, + ulid: /^[0-9A-HJKMNP-TV-Z]{26}$/, + /** + * `a-z` was added to replicate /i flag + */ + email: /^(?!\.)(?!.*\.\.)([a-zA-Z0-9_'+\-\.]*)[a-zA-Z0-9_+-]@([a-zA-Z0-9][a-zA-Z0-9\-]*\.)+[a-zA-Z]{2,}$/, + /** + * Constructed a valid Unicode RegExp + * + * Lazily instantiate since this type of regex isn't supported + * in all envs (e.g. React Native). + * + * See: + * https://github.com/colinhacks/zod/issues/2433 + * Fix in Zod: + * https://github.com/colinhacks/zod/commit/9340fd51e48576a75adc919bff65dbc4a5d4c99b + */ + emoji: () => { + if (emojiRegex === undefined) { + emojiRegex = RegExp('^(\\p{Extended_Pictographic}|\\p{Emoji_Component})+$', 'u'); + } + return emojiRegex; + }, + /** + * Unused + */ + uuid: /^[0-9a-fA-F]{8}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{12}$/, + /** + * Unused + */ + ipv4: /^(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.){3}(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$/, + /** + * Unused + */ + ipv6: /^(([a-f0-9]{1,4}:){7}|::([a-f0-9]{1,4}:){0,6}|([a-f0-9]{1,4}:){1}:([a-f0-9]{1,4}:){0,5}|([a-f0-9]{1,4}:){2}:([a-f0-9]{1,4}:){0,4}|([a-f0-9]{1,4}:){3}:([a-f0-9]{1,4}:){0,3}|([a-f0-9]{1,4}:){4}:([a-f0-9]{1,4}:){0,2}|([a-f0-9]{1,4}:){5}:([a-f0-9]{1,4}:){0,1})([a-f0-9]{1,4}|(((25[0-5])|(2[0-4][0-9])|(1[0-9]{2})|([0-9]{1,2}))\.){3}((25[0-5])|(2[0-4][0-9])|(1[0-9]{2})|([0-9]{1,2})))$/, + base64: /^([0-9a-zA-Z+/]{4})*(([0-9a-zA-Z+/]{2}==)|([0-9a-zA-Z+/]{3}=))?$/, + nanoid: /^[a-zA-Z0-9_-]{21}$/, +} as const; + +export type JsonSchema7StringType = { + type: 'string'; + minLength?: number; + maxLength?: number; + format?: + | 'email' + | 'idn-email' + | 'uri' + | 'uuid' + | 'date-time' + | 'ipv4' + | 'ipv6' + | 'date' + | 'time' + | 'duration'; + pattern?: string; + allOf?: { + pattern: string; + errorMessage?: ErrorMessages<{ pattern: string }>; + }[]; + anyOf?: { + format: string; + errorMessage?: ErrorMessages<{ format: string }>; + }[]; + errorMessage?: ErrorMessages; + contentEncoding?: string; +}; + +export function parseStringDef(def: ZodStringDef, refs: Refs): JsonSchema7StringType { + const res: JsonSchema7StringType = { + type: 'string', + }; + + function processPattern(value: string): string { + return refs.patternStrategy === 'escape' ? escapeNonAlphaNumeric(value) : value; + } + + if (def.checks) { + for (const check of def.checks) { + switch (check.kind) { + case 'min': + setResponseValueAndErrors( + res, + 'minLength', + typeof res.minLength === 'number' ? Math.max(res.minLength, check.value) : check.value, + check.message, + refs, + ); + break; + case 'max': + setResponseValueAndErrors( + res, + 'maxLength', + typeof res.maxLength === 'number' ? Math.min(res.maxLength, check.value) : check.value, + check.message, + refs, + ); + + break; + case 'email': + switch (refs.emailStrategy) { + case 'format:email': + addFormat(res, 'email', check.message, refs); + break; + case 'format:idn-email': + addFormat(res, 'idn-email', check.message, refs); + break; + case 'pattern:zod': + addPattern(res, zodPatterns.email, check.message, refs); + break; + } + + break; + case 'url': + addFormat(res, 'uri', check.message, refs); + break; + case 'uuid': + addFormat(res, 'uuid', check.message, refs); + break; + case 'regex': + addPattern(res, check.regex, check.message, refs); + break; + case 'cuid': + addPattern(res, zodPatterns.cuid, check.message, refs); + break; + case 'cuid2': + addPattern(res, zodPatterns.cuid2, check.message, refs); + break; + case 'startsWith': + addPattern(res, RegExp(`^${processPattern(check.value)}`), check.message, refs); + break; + case 'endsWith': + addPattern(res, RegExp(`${processPattern(check.value)}$`), check.message, refs); + break; + + case 'datetime': + addFormat(res, 'date-time', check.message, refs); + break; + case 'date': + addFormat(res, 'date', check.message, refs); + break; + case 'time': + addFormat(res, 'time', check.message, refs); + break; + case 'duration': + addFormat(res, 'duration', check.message, refs); + break; + case 'length': + setResponseValueAndErrors( + res, + 'minLength', + typeof res.minLength === 'number' ? Math.max(res.minLength, check.value) : check.value, + check.message, + refs, + ); + setResponseValueAndErrors( + res, + 'maxLength', + typeof res.maxLength === 'number' ? Math.min(res.maxLength, check.value) : check.value, + check.message, + refs, + ); + break; + case 'includes': { + addPattern(res, RegExp(processPattern(check.value)), check.message, refs); + break; + } + case 'ip': { + if (check.version !== 'v6') { + addFormat(res, 'ipv4', check.message, refs); + } + if (check.version !== 'v4') { + addFormat(res, 'ipv6', check.message, refs); + } + break; + } + case 'emoji': + addPattern(res, zodPatterns.emoji, check.message, refs); + break; + case 'ulid': { + addPattern(res, zodPatterns.ulid, check.message, refs); + break; + } + case 'base64': { + switch (refs.base64Strategy) { + case 'format:binary': { + addFormat(res, 'binary' as any, check.message, refs); + break; + } + + case 'contentEncoding:base64': { + setResponseValueAndErrors(res, 'contentEncoding', 'base64', check.message, refs); + break; + } + + case 'pattern:zod': { + addPattern(res, zodPatterns.base64, check.message, refs); + break; + } + } + break; + } + case 'nanoid': { + addPattern(res, zodPatterns.nanoid, check.message, refs); + } + case 'toLowerCase': + case 'toUpperCase': + case 'trim': + break; + default: + ((_: never) => {})(check); + } + } + } + + return res; +} + +const escapeNonAlphaNumeric = (value: string) => + Array.from(value) + .map((c) => (/[a-zA-Z0-9]/.test(c) ? c : `\\${c}`)) + .join(''); + +const addFormat = ( + schema: JsonSchema7StringType, + value: Required['format'], + message: string | undefined, + refs: Refs, +) => { + if (schema.format || schema.anyOf?.some((x) => x.format)) { + if (!schema.anyOf) { + schema.anyOf = []; + } + + if (schema.format) { + schema.anyOf!.push({ + format: schema.format, + ...(schema.errorMessage && + refs.errorMessages && { + errorMessage: { format: schema.errorMessage.format }, + }), + }); + delete schema.format; + if (schema.errorMessage) { + delete schema.errorMessage.format; + if (Object.keys(schema.errorMessage).length === 0) { + delete schema.errorMessage; + } + } + } + + schema.anyOf!.push({ + format: value, + ...(message && refs.errorMessages && { errorMessage: { format: message } }), + }); + } else { + setResponseValueAndErrors(schema, 'format', value, message, refs); + } +}; + +const addPattern = ( + schema: JsonSchema7StringType, + regex: RegExp | (() => RegExp), + message: string | undefined, + refs: Refs, +) => { + if (schema.pattern || schema.allOf?.some((x) => x.pattern)) { + if (!schema.allOf) { + schema.allOf = []; + } + + if (schema.pattern) { + schema.allOf!.push({ + pattern: schema.pattern, + ...(schema.errorMessage && + refs.errorMessages && { + errorMessage: { pattern: schema.errorMessage.pattern }, + }), + }); + delete schema.pattern; + if (schema.errorMessage) { + delete schema.errorMessage.pattern; + if (Object.keys(schema.errorMessage).length === 0) { + delete schema.errorMessage; + } + } + } + + schema.allOf!.push({ + pattern: processRegExp(regex, refs), + ...(message && refs.errorMessages && { errorMessage: { pattern: message } }), + }); + } else { + setResponseValueAndErrors(schema, 'pattern', processRegExp(regex, refs), message, refs); + } +}; + +// Mutate z.string.regex() in a best attempt to accommodate for regex flags when applyRegexFlags is true +const processRegExp = (regexOrFunction: RegExp | (() => RegExp), refs: Refs): string => { + const regex = typeof regexOrFunction === 'function' ? regexOrFunction() : regexOrFunction; + if (!refs.applyRegexFlags || !regex.flags) return regex.source; + + // Currently handled flags + const flags = { + i: regex.flags.includes('i'), // Case-insensitive + m: regex.flags.includes('m'), // `^` and `$` matches adjacent to newline characters + s: regex.flags.includes('s'), // `.` matches newlines + }; + + // The general principle here is to step through each character, one at a time, applying mutations as flags require. We keep track when the current character is escaped, and when it's inside a group /like [this]/ or (also) a range like /[a-z]/. The following is fairly brittle imperative code; edit at your peril! + + const source = flags.i ? regex.source.toLowerCase() : regex.source; + let pattern = ''; + let isEscaped = false; + let inCharGroup = false; + let inCharRange = false; + + for (let i = 0; i < source.length; i++) { + if (isEscaped) { + pattern += source[i]; + isEscaped = false; + continue; + } + + if (flags.i) { + if (inCharGroup) { + if (source[i].match(/[a-z]/)) { + if (inCharRange) { + pattern += source[i]; + pattern += `${source[i - 2]}-${source[i]}`.toUpperCase(); + inCharRange = false; + } else if (source[i + 1] === '-' && source[i + 2]?.match(/[a-z]/)) { + pattern += source[i]; + inCharRange = true; + } else { + pattern += `${source[i]}${source[i].toUpperCase()}`; + } + continue; + } + } else if (source[i].match(/[a-z]/)) { + pattern += `[${source[i]}${source[i].toUpperCase()}]`; + continue; + } + } + + if (flags.m) { + if (source[i] === '^') { + pattern += `(^|(?<=[\r\n]))`; + continue; + } else if (source[i] === '$') { + pattern += `($|(?=[\r\n]))`; + continue; + } + } + + if (flags.s && source[i] === '.') { + pattern += inCharGroup ? `${source[i]}\r\n` : `[${source[i]}\r\n]`; + continue; + } + + pattern += source[i]; + if (source[i] === '\\') { + isEscaped = true; + } else if (inCharGroup && source[i] === ']') { + inCharGroup = false; + } else if (!inCharGroup && source[i] === '[') { + inCharGroup = true; + } + } + + try { + const regexTest = new RegExp(pattern); + } catch { + console.warn( + `Could not convert regex pattern at ${refs.currentPath.join( + '/', + )} to a flag-independent form! Falling back to the flag-ignorant source`, + ); + return regex.source; + } + + return pattern; +}; diff --git a/src/_vendor/zod-to-json-schema/parsers/tuple.ts b/src/_vendor/zod-to-json-schema/parsers/tuple.ts new file mode 100644 index 000000000..b2a824006 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/tuple.ts @@ -0,0 +1,54 @@ +import { ZodTupleDef, ZodTupleItems, ZodTypeAny } from 'zod'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; + +export type JsonSchema7TupleType = { + type: 'array'; + minItems: number; + items: JsonSchema7Type[]; +} & ( + | { + maxItems: number; + } + | { + additionalItems?: JsonSchema7Type | undefined; + } +); + +export function parseTupleDef( + def: ZodTupleDef, + refs: Refs, +): JsonSchema7TupleType { + if (def.rest) { + return { + type: 'array', + minItems: def.items.length, + items: def.items + .map((x, i) => + parseDef(x._def, { + ...refs, + currentPath: [...refs.currentPath, 'items', `${i}`], + }), + ) + .reduce((acc: JsonSchema7Type[], x) => (x === undefined ? acc : [...acc, x]), []), + additionalItems: parseDef(def.rest._def, { + ...refs, + currentPath: [...refs.currentPath, 'additionalItems'], + }), + }; + } else { + return { + type: 'array', + minItems: def.items.length, + maxItems: def.items.length, + items: def.items + .map((x, i) => + parseDef(x._def, { + ...refs, + currentPath: [...refs.currentPath, 'items', `${i}`], + }), + ) + .reduce((acc: JsonSchema7Type[], x) => (x === undefined ? acc : [...acc, x]), []), + }; + } +} diff --git a/src/_vendor/zod-to-json-schema/parsers/undefined.ts b/src/_vendor/zod-to-json-schema/parsers/undefined.ts new file mode 100644 index 000000000..6864d8138 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/undefined.ts @@ -0,0 +1,9 @@ +export type JsonSchema7UndefinedType = { + not: {}; +}; + +export function parseUndefinedDef(): JsonSchema7UndefinedType { + return { + not: {}, + }; +} diff --git a/src/_vendor/zod-to-json-schema/parsers/union.ts b/src/_vendor/zod-to-json-schema/parsers/union.ts new file mode 100644 index 000000000..1daf14908 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/union.ts @@ -0,0 +1,119 @@ +import { ZodDiscriminatedUnionDef, ZodLiteralDef, ZodTypeAny, ZodUnionDef } from 'zod'; +import { JsonSchema7Type, parseDef } from '../parseDef'; +import { Refs } from '../Refs'; + +export const primitiveMappings = { + ZodString: 'string', + ZodNumber: 'number', + ZodBigInt: 'integer', + ZodBoolean: 'boolean', + ZodNull: 'null', +} as const; +type ZodPrimitive = keyof typeof primitiveMappings; +type JsonSchema7Primitive = (typeof primitiveMappings)[keyof typeof primitiveMappings]; + +export type JsonSchema7UnionType = JsonSchema7PrimitiveUnionType | JsonSchema7AnyOfType; + +type JsonSchema7PrimitiveUnionType = + | { + type: JsonSchema7Primitive | JsonSchema7Primitive[]; + } + | { + type: JsonSchema7Primitive | JsonSchema7Primitive[]; + enum: (string | number | bigint | boolean | null)[]; + }; + +type JsonSchema7AnyOfType = { + anyOf: JsonSchema7Type[]; +}; + +export function parseUnionDef( + def: ZodUnionDef | ZodDiscriminatedUnionDef, + refs: Refs, +): JsonSchema7PrimitiveUnionType | JsonSchema7AnyOfType | undefined { + if (refs.target === 'openApi3') return asAnyOf(def, refs); + + const options: readonly ZodTypeAny[] = + def.options instanceof Map ? Array.from(def.options.values()) : def.options; + + // This blocks tries to look ahead a bit to produce nicer looking schemas with type array instead of anyOf. + if ( + options.every((x) => x._def.typeName in primitiveMappings && (!x._def.checks || !x._def.checks.length)) + ) { + // all types in union are primitive and lack checks, so might as well squash into {type: [...]} + + const types = options.reduce((types: JsonSchema7Primitive[], x) => { + const type = primitiveMappings[x._def.typeName as ZodPrimitive]; //Can be safely casted due to row 43 + return type && !types.includes(type) ? [...types, type] : types; + }, []); + + return { + type: types.length > 1 ? types : types[0]!, + }; + } else if (options.every((x) => x._def.typeName === 'ZodLiteral' && !x.description)) { + // all options literals + + const types = options.reduce((acc: JsonSchema7Primitive[], x: { _def: ZodLiteralDef }) => { + const type = typeof x._def.value; + switch (type) { + case 'string': + case 'number': + case 'boolean': + return [...acc, type]; + case 'bigint': + return [...acc, 'integer' as const]; + case 'object': + if (x._def.value === null) return [...acc, 'null' as const]; + case 'symbol': + case 'undefined': + case 'function': + default: + return acc; + } + }, []); + + if (types.length === options.length) { + // all the literals are primitive, as far as null can be considered primitive + + const uniqueTypes = types.filter((x, i, a) => a.indexOf(x) === i); + return { + type: uniqueTypes.length > 1 ? uniqueTypes : uniqueTypes[0]!, + enum: options.reduce( + (acc, x) => { + return acc.includes(x._def.value) ? acc : [...acc, x._def.value]; + }, + [] as (string | number | bigint | boolean | null)[], + ), + }; + } + } else if (options.every((x) => x._def.typeName === 'ZodEnum')) { + return { + type: 'string', + enum: options.reduce( + (acc: string[], x) => [...acc, ...x._def.values.filter((x: string) => !acc.includes(x))], + [], + ), + }; + } + + return asAnyOf(def, refs); +} + +const asAnyOf = ( + def: ZodUnionDef | ZodDiscriminatedUnionDef, + refs: Refs, +): JsonSchema7PrimitiveUnionType | JsonSchema7AnyOfType | undefined => { + const anyOf = ((def.options instanceof Map ? Array.from(def.options.values()) : def.options) as any[]) + .map((x, i) => + parseDef(x._def, { + ...refs, + currentPath: [...refs.currentPath, 'anyOf', `${i}`], + }), + ) + .filter( + (x): x is JsonSchema7Type => + !!x && (!refs.strictUnions || (typeof x === 'object' && Object.keys(x).length > 0)), + ); + + return anyOf.length ? { anyOf } : undefined; +}; diff --git a/src/_vendor/zod-to-json-schema/parsers/unknown.ts b/src/_vendor/zod-to-json-schema/parsers/unknown.ts new file mode 100644 index 000000000..a3c8d1d96 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/parsers/unknown.ts @@ -0,0 +1,5 @@ +export type JsonSchema7UnknownType = {}; + +export function parseUnknownDef(): JsonSchema7UnknownType { + return {}; +} diff --git a/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts b/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts new file mode 100644 index 000000000..a744634be --- /dev/null +++ b/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts @@ -0,0 +1,91 @@ +import { ZodSchema } from 'zod'; +import { Options, Targets } from './Options'; +import { JsonSchema7Type, parseDef } from './parseDef'; +import { getRefs } from './Refs'; + +const zodToJsonSchema = ( + schema: ZodSchema, + options?: Partial> | string, +): (Target extends 'jsonSchema7' ? JsonSchema7Type : object) & { + $schema?: string; + definitions?: { + [key: string]: Target extends 'jsonSchema7' ? JsonSchema7Type + : Target extends 'jsonSchema2019-09' ? JsonSchema7Type + : object; + }; +} => { + const refs = getRefs(options); + + const definitions = + typeof options === 'object' && options.definitions ? + Object.entries(options.definitions).reduce( + (acc, [name, schema]) => ({ + ...acc, + [name]: + parseDef( + schema._def, + { + ...refs, + currentPath: [...refs.basePath, refs.definitionPath, name], + }, + true, + ) ?? {}, + }), + {}, + ) + : undefined; + + const name = + typeof options === 'string' ? options + : options?.nameStrategy === 'title' ? undefined + : options?.name; + + const main = + parseDef( + schema._def, + name === undefined ? refs : ( + { + ...refs, + currentPath: [...refs.basePath, refs.definitionPath, name], + } + ), + false, + ) ?? {}; + + const title = + typeof options === 'object' && options.name !== undefined && options.nameStrategy === 'title' ? + options.name + : undefined; + + if (title !== undefined) { + main.title = title; + } + + const combined: ReturnType> = + name === undefined ? + definitions ? + { + ...main, + [refs.definitionPath]: definitions, + } + : main + : { + $ref: [...(refs.$refStrategy === 'relative' ? [] : refs.basePath), refs.definitionPath, name].join( + '/', + ), + [refs.definitionPath]: { + ...definitions, + [name]: main, + }, + }; + + if (refs.target === 'jsonSchema7') { + combined.$schema = 'http://json-schema.org/draft-07/schema#'; + } else if (refs.target === 'jsonSchema2019-09') { + combined.$schema = 'https://json-schema.org/draft/2019-09/schema#'; + } + + return combined; +}; + +export { zodToJsonSchema }; diff --git a/src/error.ts b/src/error.ts index 19a60598a..83ddbfafa 100644 --- a/src/error.ts +++ b/src/error.ts @@ -156,3 +156,15 @@ export class RateLimitError extends APIError { } export class InternalServerError extends APIError {} + +export class LengthFinishReasonError extends OpenAIError { + constructor() { + super(`Could not parse response content as the length limit was reached`); + } +} + +export class ContentFilterFinishReasonError extends OpenAIError { + constructor() { + super(`Could not parse response content as the request was rejected by the content filter`); + } +} diff --git a/src/helpers/zod.ts b/src/helpers/zod.ts new file mode 100644 index 000000000..ed83d3510 --- /dev/null +++ b/src/helpers/zod.ts @@ -0,0 +1,102 @@ +import { ResponseFormatJSONSchema } from 'openai/resources'; +import type z from 'zod'; +import { + AutoParseableResponseFormat, + AutoParseableTool, + makeParseableResponseFormat, + makeParseableTool, +} from '../lib/parser'; +import { zodToJsonSchema as _zodToJsonSchema } from '../_vendor/zod-to-json-schema'; + +function zodToJsonSchema(schema: z.ZodType): Record { + return _zodToJsonSchema(schema, { openaiStrictMode: true }); +} + +/** + * Creates a chat completion `JSONSchema` response format object from + * the given Zod schema. + * + * If this is passed to the `.parse()`, `.stream()` or `.runTools()` + * chat completion methods then the response message will contain a + * `.parsed` property that is the result of parsing the content with + * the given Zod object. + * + * ```ts + * const completion = await client.beta.chat.completions.parse({ + * model: 'gpt-4o-2024-08-06', + * messages: [ + * { role: 'system', content: 'You are a helpful math tutor.' }, + * { role: 'user', content: 'solve 8x + 31 = 2' }, + * ], + * response_format: zodResponseFormat( + * z.object({ + * steps: z.array(z.object({ + * explanation: z.string(), + * answer: z.string(), + * })), + * final_answer: z.string(), + * }), + * 'math_answer', + * ), + * }); + * const message = completion.choices[0]?.message; + * if (message?.parsed) { + * console.log(message.parsed); + * console.log(message.parsed.final_answer); + * } + * ``` + * + * This can be passed directly to the `.create()` method but will not + * result in any automatic parsing, you'll have to parse the response yourself. + */ +export function zodResponseFormat( + zodObject: ZodInput, + name: string, + props?: Omit, +): AutoParseableResponseFormat> { + return makeParseableResponseFormat( + { + type: 'json_schema', + json_schema: { + ...props, + name, + strict: true, + schema: zodToJsonSchema(zodObject), + }, + }, + (content) => zodObject.parse(JSON.parse(content)), + ); +} + +/** + * Creates a chat completion `function` tool that can be invoked + * automatically by the chat completion `.runTools()` method or automatically + * parsed by `.parse()` / `.stream()`. + */ +export function zodFunction(options: { + name: string; + parameters: Parameters; + function?: ((args: z.infer) => unknown | Promise) | undefined; + description?: string | undefined; +}): AutoParseableTool<{ + arguments: Parameters; + name: string; + function: (args: z.infer) => unknown; +}> { + // @ts-expect-error TODO + return makeParseableTool( + { + type: 'function', + function: { + name: options.name, + parameters: zodToJsonSchema(options.parameters), + strict: true, + ...(options.description ? { description: options.description } : undefined), + }, + }, + { + callback: options.function, + parser: (args) => options.parameters.parse(JSON.parse(args)), + }, + ); +} diff --git a/src/index.ts b/src/index.ts index cd0dd67b3..5f7dffd67 100644 --- a/src/index.ts +++ b/src/index.ts @@ -248,6 +248,7 @@ export namespace OpenAI { export import ChatCompletionChunk = API.ChatCompletionChunk; export import ChatCompletionContentPart = API.ChatCompletionContentPart; export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage; + export import ChatCompletionContentPartRefusal = API.ChatCompletionContentPartRefusal; export import ChatCompletionContentPartText = API.ChatCompletionContentPartText; export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption; export import ChatCompletionFunctionMessageParam = API.ChatCompletionFunctionMessageParam; @@ -322,6 +323,9 @@ export namespace OpenAI { export import ErrorObject = API.ErrorObject; export import FunctionDefinition = API.FunctionDefinition; export import FunctionParameters = API.FunctionParameters; + export import ResponseFormatJSONObject = API.ResponseFormatJSONObject; + export import ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; + export import ResponseFormatText = API.ResponseFormatText; } // ---------------------- Azure ---------------------- diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts index 590013aa6..39ee4e993 100644 --- a/src/lib/AbstractChatCompletionRunner.ts +++ b/src/lib/AbstractChatCompletionRunner.ts @@ -1,7 +1,6 @@ import * as Core from 'openai/core'; import { type CompletionUsage } from 'openai/resources/completions'; import { - type Completions, type ChatCompletion, type ChatCompletionMessage, type ChatCompletionMessageParam, @@ -13,6 +12,7 @@ import { type RunnableFunction, isRunnableFunctionWithParse, type BaseFunctionsArgs, + RunnableToolFunction, } from './RunnableFunction'; import { ChatCompletionFunctionRunnerParams, ChatCompletionToolRunnerParams } from './ChatCompletionRunner'; import { @@ -21,6 +21,9 @@ import { } from './ChatCompletionStreamingRunner'; import { isAssistantMessage, isFunctionMessage, isToolMessage } from './chatCompletionUtils'; import { BaseEvents, EventStream } from './EventStream'; +import { ParsedChatCompletion } from '../resources/beta/chat/completions'; +import OpenAI from '../index'; +import { isAutoParsableTool, parseChatCompletion } from 'openai/lib/parser'; const DEFAULT_MAX_CHAT_COMPLETIONS = 10; export interface RunnerOptions extends Core.RequestOptions { @@ -30,14 +33,15 @@ export interface RunnerOptions extends Core.RequestOptions { export class AbstractChatCompletionRunner< EventTypes extends AbstractChatCompletionRunnerEvents, + ParsedT, > extends EventStream { - protected _chatCompletions: ChatCompletion[] = []; + protected _chatCompletions: ParsedChatCompletion[] = []; messages: ChatCompletionMessageParam[] = []; protected _addChatCompletion( - this: AbstractChatCompletionRunner, - chatCompletion: ChatCompletion, - ): ChatCompletion { + this: AbstractChatCompletionRunner, + chatCompletion: ParsedChatCompletion, + ): ParsedChatCompletion { this._chatCompletions.push(chatCompletion); this._emit('chatCompletion', chatCompletion); const message = chatCompletion.choices[0]?.message; @@ -46,7 +50,7 @@ export class AbstractChatCompletionRunner< } protected _addMessage( - this: AbstractChatCompletionRunner, + this: AbstractChatCompletionRunner, message: ChatCompletionMessageParam, emit = true, ) { @@ -75,7 +79,7 @@ export class AbstractChatCompletionRunner< * @returns a promise that resolves with the final ChatCompletion, or rejects * if an error occurred or the stream ended prematurely without producing a ChatCompletion. */ - async finalChatCompletion(): Promise { + async finalChatCompletion(): Promise> { await this.done(); const completion = this._chatCompletions[this._chatCompletions.length - 1]; if (!completion) throw new OpenAIError('stream ended without producing a ChatCompletion'); @@ -101,7 +105,11 @@ export class AbstractChatCompletionRunner< const message = this.messages[i]; if (isAssistantMessage(message)) { const { function_call, ...rest } = message; - const ret: ChatCompletionMessage = { ...rest, content: message.content ?? null }; + const ret: ChatCompletionMessage = { + ...rest, + content: (message as ChatCompletionMessage).content ?? null, + refusal: (message as ChatCompletionMessage).refusal ?? null, + }; if (function_call) { ret.function_call = function_call; } @@ -152,6 +160,7 @@ export class AbstractChatCompletionRunner< if ( isToolMessage(message) && message.content != null && + typeof message.content === 'string' && this.messages.some( (x) => x.role === 'assistant' && @@ -195,7 +204,9 @@ export class AbstractChatCompletionRunner< return [...this._chatCompletions]; } - protected override _emitFinal(this: AbstractChatCompletionRunner) { + protected override _emitFinal( + this: AbstractChatCompletionRunner, + ) { const completion = this._chatCompletions[this._chatCompletions.length - 1]; if (completion) this._emit('finalChatCompletion', completion); const finalMessage = this.#getFinalMessage(); @@ -223,10 +234,10 @@ export class AbstractChatCompletionRunner< } protected async _createChatCompletion( - completions: Completions, + client: OpenAI, params: ChatCompletionCreateParams, options?: Core.RequestOptions, - ): Promise { + ): Promise> { const signal = options?.signal; if (signal) { if (signal.aborted) this.controller.abort(); @@ -234,27 +245,27 @@ export class AbstractChatCompletionRunner< } this.#validateParams(params); - const chatCompletion = await completions.create( + const chatCompletion = await client.chat.completions.create( { ...params, stream: false }, { ...options, signal: this.controller.signal }, ); this._connected(); - return this._addChatCompletion(chatCompletion); + return this._addChatCompletion(parseChatCompletion(chatCompletion, params)); } protected async _runChatCompletion( - completions: Completions, + client: OpenAI, params: ChatCompletionCreateParams, options?: Core.RequestOptions, ): Promise { for (const message of params.messages) { this._addMessage(message, false); } - return await this._createChatCompletion(completions, params, options); + return await this._createChatCompletion(client, params, options); } protected async _runFunctions( - completions: Completions, + client: OpenAI, params: | ChatCompletionFunctionRunnerParams | ChatCompletionStreamingFunctionRunnerParams, @@ -284,7 +295,7 @@ export class AbstractChatCompletionRunner< for (let i = 0; i < maxChatCompletions; ++i) { const chatCompletion: ChatCompletion = await this._createChatCompletion( - completions, + client, { ...restParams, function_call, @@ -339,7 +350,7 @@ export class AbstractChatCompletionRunner< } protected async _runTools( - completions: Completions, + client: OpenAI, params: | ChatCompletionToolRunnerParams | ChatCompletionStreamingToolRunnerParams, @@ -350,8 +361,31 @@ export class AbstractChatCompletionRunner< const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name; const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {}; + // TODO(someday): clean this logic up + const inputTools = params.tools.map((tool): RunnableToolFunction => { + if (isAutoParsableTool(tool)) { + if (!tool.$callback) { + throw new OpenAIError('Tool given to `.runTools()` that does not have an associated function'); + } + + return { + type: 'function', + function: { + function: tool.$callback, + name: tool.function.name, + description: tool.function.description || '', + parameters: tool.function.parameters as any, + parse: tool.$parseRaw, + strict: true, + }, + }; + } + + return tool as any as RunnableToolFunction; + }); + const functionsByName: Record> = {}; - for (const f of params.tools) { + for (const f of inputTools) { if (f.type === 'function') { functionsByName[f.function.name || f.function.function.name] = f.function; } @@ -359,7 +393,7 @@ export class AbstractChatCompletionRunner< const tools: ChatCompletionTool[] = 'tools' in params ? - params.tools.map((t) => + inputTools.map((t) => t.type === 'function' ? { type: 'function', @@ -367,6 +401,7 @@ export class AbstractChatCompletionRunner< name: t.function.name || t.function.function.name, parameters: t.function.parameters as Record, description: t.function.description, + strict: t.function.strict, }, } : (t as unknown as ChatCompletionTool), @@ -379,7 +414,7 @@ export class AbstractChatCompletionRunner< for (let i = 0; i < maxChatCompletions; ++i) { const chatCompletion: ChatCompletion = await this._createChatCompletion( - completions, + client, { ...restParams, tool_choice, @@ -392,7 +427,7 @@ export class AbstractChatCompletionRunner< if (!message) { throw new OpenAIError(`missing message in ChatCompletion response`); } - if (!message.tool_calls) { + if (!message.tool_calls?.length) { return; } @@ -403,8 +438,10 @@ export class AbstractChatCompletionRunner< const fn = functionsByName[name]; if (!fn) { - const content = `Invalid tool_call: ${JSON.stringify(name)}. Available options are: ${tools - .map((f) => JSON.stringify(f.function.name)) + const content = `Invalid tool_call: ${JSON.stringify(name)}. Available options are: ${Object.keys( + functionsByName, + ) + .map((name) => JSON.stringify(name)) .join(', ')}. Please try again`; this._addMessage({ role, tool_call_id, content }); diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index 0f88530b3..32cde3e7a 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -191,12 +191,12 @@ export class AssistantStream threadId: string, runId: string, runs: Runs, - body: RunSubmitToolOutputsParamsStream, + params: RunSubmitToolOutputsParamsStream, options: RequestOptions | undefined, ) { const runner = new AssistantStream(); runner._run(() => - runner._runToolAssistantStream(threadId, runId, runs, body, { + runner._runToolAssistantStream(threadId, runId, runs, params, { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' }, }), @@ -236,13 +236,13 @@ export class AssistantStream } static createThreadAssistantStream( - body: ThreadCreateAndRunParamsBaseStream, + params: ThreadCreateAndRunParamsBaseStream, thread: Threads, options?: RequestOptions, ) { const runner = new AssistantStream(); runner._run(() => - runner._threadAssistantStream(body, thread, { + runner._threadAssistantStream(params, thread, { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' }, }), @@ -723,11 +723,11 @@ export class AssistantStream } protected async _threadAssistantStream( - body: ThreadCreateAndRunParamsBase, + params: ThreadCreateAndRunParamsBase, thread: Threads, options?: Core.RequestOptions, ): Promise { - return await this._createThreadAssistantStream(thread, body, options); + return await this._createThreadAssistantStream(thread, params, options); } protected async _runAssistantStream( diff --git a/src/lib/ChatCompletionRunner.ts b/src/lib/ChatCompletionRunner.ts index c756919b0..8139c577b 100644 --- a/src/lib/ChatCompletionRunner.ts +++ b/src/lib/ChatCompletionRunner.ts @@ -1,5 +1,4 @@ import { - type Completions, type ChatCompletionMessageParam, type ChatCompletionCreateParamsNonStreaming, } from 'openai/resources/chat/completions'; @@ -10,6 +9,8 @@ import { RunnerOptions, } from './AbstractChatCompletionRunner'; import { isAssistantMessage } from './chatCompletionUtils'; +import OpenAI from 'openai/index'; +import { AutoParseableTool } from 'openai/lib/parser'; export interface ChatCompletionRunnerEvents extends AbstractChatCompletionRunnerEvents { content: (content: string) => void; @@ -26,40 +27,43 @@ export type ChatCompletionToolRunnerParams & { - tools: RunnableTools; + tools: RunnableTools | AutoParseableTool[]; }; -export class ChatCompletionRunner extends AbstractChatCompletionRunner { +export class ChatCompletionRunner extends AbstractChatCompletionRunner< + ChatCompletionRunnerEvents, + ParsedT +> { /** @deprecated - please use `runTools` instead. */ static runFunctions( - completions: Completions, + client: OpenAI, params: ChatCompletionFunctionRunnerParams, options?: RunnerOptions, - ): ChatCompletionRunner { + ): ChatCompletionRunner { const runner = new ChatCompletionRunner(); const opts = { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runFunctions' }, }; - runner._run(() => runner._runFunctions(completions, params, opts)); + runner._run(() => runner._runFunctions(client, params, opts)); return runner; } - static runTools( - completions: Completions, + static runTools( + client: OpenAI, params: ChatCompletionToolRunnerParams, options?: RunnerOptions, - ): ChatCompletionRunner { - const runner = new ChatCompletionRunner(); + ): ChatCompletionRunner { + const runner = new ChatCompletionRunner(); const opts = { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runTools' }, }; - runner._run(() => runner._runTools(completions, params, opts)); + runner._run(() => runner._runTools(client, params, opts)); return runner; } - override _addMessage(this: ChatCompletionRunner, message: ChatCompletionMessageParam) { + override _addMessage(this: ChatCompletionRunner, message: ChatCompletionMessageParam) { super._addMessage(message); if (isAssistantMessage(message) && message.content) { this._emit('content', message.content as string); diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts index 5eee904a2..e3661c8c1 100644 --- a/src/lib/ChatCompletionStream.ts +++ b/src/lib/ChatCompletionStream.ts @@ -1,10 +1,16 @@ import * as Core from 'openai/core'; -import { OpenAIError, APIUserAbortError } from 'openai/error'; import { - Completions, + OpenAIError, + APIUserAbortError, + LengthFinishReasonError, + ContentFilterFinishReasonError, +} from 'openai/error'; +import { + ChatCompletionTokenLogprob, type ChatCompletion, type ChatCompletionChunk, type ChatCompletionCreateParams, + type ChatCompletionCreateParamsStreaming, type ChatCompletionCreateParamsBase, } from 'openai/resources/chat/completions'; import { @@ -13,22 +19,125 @@ import { } from './AbstractChatCompletionRunner'; import { type ReadableStream } from 'openai/_shims/index'; import { Stream } from 'openai/streaming'; +import OpenAI from 'openai/index'; +import { ParsedChatCompletion } from 'openai/resources/beta/chat/completions'; +import { + AutoParseableResponseFormat, + hasAutoParseableInput, + isAutoParsableResponseFormat, + isAutoParsableTool, + maybeParseChatCompletion, + shouldParseToolCall, +} from 'openai/lib/parser'; +import { partialParse } from '../_vendor/partial-json-parser/parser'; + +export interface ContentDeltaEvent { + delta: string; + snapshot: string; + parsed: unknown | null; +} + +export interface ContentDoneEvent { + content: string; + parsed: ParsedT | null; +} + +export interface RefusalDeltaEvent { + delta: string; + snapshot: string; +} + +export interface RefusalDoneEvent { + refusal: string; +} + +export interface FunctionToolCallArgumentsDeltaEvent { + name: string; + + index: number; + + arguments: string; + + parsed_arguments: unknown; + + arguments_delta: string; +} + +export interface FunctionToolCallArgumentsDoneEvent { + name: string; + + index: number; + + arguments: string; -export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents { + parsed_arguments: unknown; +} + +export interface LogProbsContentDeltaEvent { + content: Array; + snapshot: Array; +} + +export interface LogProbsContentDoneEvent { + content: Array; +} + +export interface LogProbsRefusalDeltaEvent { + refusal: Array; + snapshot: Array; +} + +export interface LogProbsRefusalDoneEvent { + refusal: Array; +} + +export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents { content: (contentDelta: string, contentSnapshot: string) => void; chunk: (chunk: ChatCompletionChunk, snapshot: ChatCompletionSnapshot) => void; + + 'content.delta': (props: ContentDeltaEvent) => void; + 'content.done': (props: ContentDoneEvent) => void; + + 'refusal.delta': (props: RefusalDeltaEvent) => void; + 'refusal.done': (props: RefusalDoneEvent) => void; + + 'tool_calls.function.arguments.delta': (props: FunctionToolCallArgumentsDeltaEvent) => void; + 'tool_calls.function.arguments.done': (props: FunctionToolCallArgumentsDoneEvent) => void; + + 'logprobs.content.delta': (props: LogProbsContentDeltaEvent) => void; + 'logprobs.content.done': (props: LogProbsContentDoneEvent) => void; + + 'logprobs.refusal.delta': (props: LogProbsRefusalDeltaEvent) => void; + 'logprobs.refusal.done': (props: LogProbsRefusalDoneEvent) => void; } export type ChatCompletionStreamParams = Omit & { stream?: true; }; -export class ChatCompletionStream - extends AbstractChatCompletionRunner +interface ChoiceEventState { + content_done: boolean; + refusal_done: boolean; + logprobs_content_done: boolean; + logprobs_refusal_done: boolean; + current_tool_call_index: number | null; + done_tool_calls: Set; +} + +export class ChatCompletionStream + extends AbstractChatCompletionRunner, ParsedT> implements AsyncIterable { + #params: ChatCompletionCreateParams | null; + #choiceEventStates: ChoiceEventState[]; #currentChatCompletionSnapshot: ChatCompletionSnapshot | undefined; + constructor(params: ChatCompletionCreateParams | null) { + super(); + this.#params = params; + this.#choiceEventStates = []; + } + get currentChatCompletionSnapshot(): ChatCompletionSnapshot | undefined { return this.#currentChatCompletionSnapshot; } @@ -40,21 +149,21 @@ export class ChatCompletionStream * Note that messages sent to the model do not appear in `.on('message')` * in this context. */ - static fromReadableStream(stream: ReadableStream): ChatCompletionStream { - const runner = new ChatCompletionStream(); + static fromReadableStream(stream: ReadableStream): ChatCompletionStream { + const runner = new ChatCompletionStream(null); runner._run(() => runner._fromReadableStream(stream)); return runner; } - static createChatCompletion( - completions: Completions, + static createChatCompletion( + client: OpenAI, params: ChatCompletionStreamParams, options?: Core.RequestOptions, - ): ChatCompletionStream { - const runner = new ChatCompletionStream(); + ): ChatCompletionStream { + const runner = new ChatCompletionStream(params as ChatCompletionCreateParamsStreaming); runner._run(() => runner._runChatCompletion( - completions, + client, { ...params, stream: true }, { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' } }, ), @@ -66,17 +175,184 @@ export class ChatCompletionStream if (this.ended) return; this.#currentChatCompletionSnapshot = undefined; } - #addChunk(this: ChatCompletionStream, chunk: ChatCompletionChunk) { + + #getChoiceEventState(choice: ChatCompletionSnapshot.Choice): ChoiceEventState { + let state = this.#choiceEventStates[choice.index]; + if (state) { + return state; + } + + state = { + content_done: false, + refusal_done: false, + logprobs_content_done: false, + logprobs_refusal_done: false, + done_tool_calls: new Set(), + current_tool_call_index: null, + }; + this.#choiceEventStates[choice.index] = state; + return state; + } + + #addChunk(this: ChatCompletionStream, chunk: ChatCompletionChunk) { if (this.ended) return; + const completion = this.#accumulateChatCompletion(chunk); this._emit('chunk', chunk, completion); - const delta = chunk.choices[0]?.delta?.content; - const snapshot = completion.choices[0]?.message; - if (delta != null && snapshot?.role === 'assistant' && snapshot?.content) { - this._emit('content', delta, snapshot.content); + + for (const choice of chunk.choices) { + const choiceSnapshot = completion.choices[choice.index]!; + + if ( + choice.delta.content != null && + choiceSnapshot.message?.role === 'assistant' && + choiceSnapshot.message?.content + ) { + this._emit('content', choice.delta.content, choiceSnapshot.message.content); + this._emit('content.delta', { + delta: choice.delta.content, + snapshot: choiceSnapshot.message.content, + parsed: choiceSnapshot.message.parsed, + }); + } + + if ( + choice.delta.refusal != null && + choiceSnapshot.message?.role === 'assistant' && + choiceSnapshot.message?.refusal + ) { + this._emit('refusal.delta', { + delta: choice.delta.refusal, + snapshot: choiceSnapshot.message.refusal, + }); + } + + if (choice.logprobs?.content != null && choiceSnapshot.message?.role === 'assistant') { + this._emit('logprobs.content.delta', { + content: choice.logprobs?.content, + snapshot: choiceSnapshot.logprobs?.content ?? [], + }); + } + + if (choice.logprobs?.refusal != null && choiceSnapshot.message?.role === 'assistant') { + this._emit('logprobs.refusal.delta', { + refusal: choice.logprobs?.refusal, + snapshot: choiceSnapshot.logprobs?.refusal ?? [], + }); + } + + const state = this.#getChoiceEventState(choiceSnapshot); + + if (choiceSnapshot.finish_reason) { + this.#emitContentDoneEvents(choiceSnapshot); + + if (state.current_tool_call_index != null) { + this.#emitToolCallDoneEvent(choiceSnapshot, state.current_tool_call_index); + } + } + + for (const toolCall of choice.delta.tool_calls ?? []) { + if (state.current_tool_call_index !== toolCall.index) { + this.#emitContentDoneEvents(choiceSnapshot); + + // new tool call started, the previous one is done + if (state.current_tool_call_index != null) { + this.#emitToolCallDoneEvent(choiceSnapshot, state.current_tool_call_index); + } + } + + state.current_tool_call_index = toolCall.index; + } + + for (const toolCallDelta of choice.delta.tool_calls ?? []) { + const toolCallSnapshot = choiceSnapshot.message.tool_calls?.[toolCallDelta.index]; + if (!toolCallSnapshot?.type) { + continue; + } + + if (toolCallSnapshot?.type === 'function') { + this._emit('tool_calls.function.arguments.delta', { + name: toolCallSnapshot.function?.name, + index: toolCallDelta.index, + arguments: toolCallSnapshot.function.arguments, + parsed_arguments: toolCallSnapshot.function.parsed_arguments, + arguments_delta: toolCallDelta.function?.arguments ?? '', + }); + } else { + assertNever(toolCallSnapshot?.type); + } + } } } - #endRequest(): ChatCompletion { + + #emitToolCallDoneEvent(choiceSnapshot: ChatCompletionSnapshot.Choice, toolCallIndex: number) { + const state = this.#getChoiceEventState(choiceSnapshot); + if (state.done_tool_calls.has(toolCallIndex)) { + // we've already fired the done event + return; + } + + const toolCallSnapshot = choiceSnapshot.message.tool_calls?.[toolCallIndex]; + if (!toolCallSnapshot) { + throw new Error('no tool call snapshot'); + } + if (!toolCallSnapshot.type) { + throw new Error('tool call snapshot missing `type`'); + } + + if (toolCallSnapshot.type === 'function') { + const inputTool = this.#params?.tools?.find( + (tool) => tool.type === 'function' && tool.function.name === toolCallSnapshot.function.name, + ); + + this._emit('tool_calls.function.arguments.done', { + name: toolCallSnapshot.function.name, + index: toolCallIndex, + arguments: toolCallSnapshot.function.arguments, + parsed_arguments: + isAutoParsableTool(inputTool) ? inputTool.$parseRaw(toolCallSnapshot.function.arguments) + : inputTool?.function.strict ? JSON.parse(toolCallSnapshot.function.arguments) + : null, + }); + } else { + assertNever(toolCallSnapshot.type); + } + } + + #emitContentDoneEvents(choiceSnapshot: ChatCompletionSnapshot.Choice) { + const state = this.#getChoiceEventState(choiceSnapshot); + + if (choiceSnapshot.message.content && !state.content_done) { + state.content_done = true; + + const responseFormat = this.#getAutoParseableResponseFormat(); + + this._emit('content.done', { + content: choiceSnapshot.message.content, + parsed: responseFormat ? responseFormat.$parseRaw(choiceSnapshot.message.content) : (null as any), + }); + } + + if (choiceSnapshot.message.refusal && !state.refusal_done) { + state.refusal_done = true; + + this._emit('refusal.done', { refusal: choiceSnapshot.message.refusal }); + } + + if (choiceSnapshot.logprobs?.content && !state.logprobs_content_done) { + state.logprobs_content_done = true; + + this._emit('logprobs.content.done', { content: choiceSnapshot.logprobs.content }); + } + + if (choiceSnapshot.logprobs?.refusal && !state.logprobs_refusal_done) { + state.logprobs_refusal_done = true; + + this._emit('logprobs.refusal.done', { refusal: choiceSnapshot.logprobs.refusal }); + } + } + + #endRequest(): ParsedChatCompletion { if (this.ended) { throw new OpenAIError(`stream has ended, this shouldn't happen`); } @@ -85,21 +361,24 @@ export class ChatCompletionStream throw new OpenAIError(`request ended without sending any chunks`); } this.#currentChatCompletionSnapshot = undefined; - return finalizeChatCompletion(snapshot); + this.#choiceEventStates = []; + return finalizeChatCompletion(snapshot, this.#params); } protected override async _createChatCompletion( - completions: Completions, + client: OpenAI, params: ChatCompletionCreateParams, options?: Core.RequestOptions, - ): Promise { + ): Promise> { + super._createChatCompletion; const signal = options?.signal; if (signal) { if (signal.aborted) this.controller.abort(); signal.addEventListener('abort', () => this.controller.abort()); } this.#beginRequest(); - const stream = await completions.create( + + const stream = await client.chat.completions.create( { ...params, stream: true }, { ...options, signal: this.controller.signal }, ); @@ -141,6 +420,15 @@ export class ChatCompletionStream return this._addChatCompletion(this.#endRequest()); } + #getAutoParseableResponseFormat(): AutoParseableResponseFormat | null { + const responseFormat = this.#params?.response_format; + if (isAutoParsableResponseFormat(responseFormat)) { + return responseFormat; + } + + return null; + } + #accumulateChatCompletion(chunk: ChatCompletionChunk): ChatCompletionSnapshot { let snapshot = this.#currentChatCompletionSnapshot; const { choices, ...rest } = chunk; @@ -163,23 +451,48 @@ export class ChatCompletionStream if (!choice.logprobs) { choice.logprobs = Object.assign({}, logprobs); } else { - const { content, ...rest } = logprobs; + const { content, refusal, ...rest } = logprobs; + assertIsEmpty(rest); Object.assign(choice.logprobs, rest); + if (content) { choice.logprobs.content ??= []; choice.logprobs.content.push(...content); } + + if (refusal) { + choice.logprobs.refusal ??= []; + choice.logprobs.refusal.push(...refusal); + } + } + } + + if (finish_reason) { + choice.finish_reason = finish_reason; + + if (this.#params && hasAutoParseableInput(this.#params)) { + if (finish_reason === 'length') { + throw new LengthFinishReasonError(); + } + + if (finish_reason === 'content_filter') { + throw new ContentFilterFinishReasonError(); + } } } - if (finish_reason) choice.finish_reason = finish_reason; Object.assign(choice, other); if (!delta) continue; // Shouldn't happen; just in case. - const { content, function_call, role, tool_calls, ...rest } = delta; + + const { content, refusal, function_call, role, tool_calls, ...rest } = delta; + assertIsEmpty(rest); Object.assign(choice.message, rest); - if (content) choice.message.content = (choice.message.content || '') + content; + if (refusal) { + choice.message.refusal = (choice.message.refusal || '') + refusal; + } + if (role) choice.message.role = role; if (function_call) { if (!choice.message.function_call) { @@ -192,23 +505,39 @@ export class ChatCompletionStream } } } + if (content) { + choice.message.content = (choice.message.content || '') + content; + + if (!choice.message.refusal && this.#getAutoParseableResponseFormat()) { + choice.message.parsed = partialParse(choice.message.content); + } + } + if (tool_calls) { if (!choice.message.tool_calls) choice.message.tool_calls = []; + for (const { index, id, type, function: fn, ...rest } of tool_calls) { - const tool_call = (choice.message.tool_calls[index] ??= {}); + const tool_call = (choice.message.tool_calls[index] ??= + {} as ChatCompletionSnapshot.Choice.Message.ToolCall); Object.assign(tool_call, rest); if (id) tool_call.id = id; if (type) tool_call.type = type; - if (fn) tool_call.function ??= { arguments: '' }; + if (fn) tool_call.function ??= { name: fn.name ?? '', arguments: '' }; if (fn?.name) tool_call.function!.name = fn.name; - if (fn?.arguments) tool_call.function!.arguments += fn.arguments; + if (fn?.arguments) { + tool_call.function!.arguments += fn.arguments; + + if (shouldParseToolCall(this.#params, tool_call)) { + tool_call.function!.parsed_arguments = partialParse(tool_call.function!.arguments); + } + } } } } return snapshot; } - [Symbol.asyncIterator](this: ChatCompletionStream): AsyncIterator { + [Symbol.asyncIterator](this: ChatCompletionStream): AsyncIterator { const pushQueue: ChatCompletionChunk[] = []; const readQueue: { resolve: (chunk: ChatCompletionChunk | undefined) => void; @@ -275,29 +604,50 @@ export class ChatCompletionStream } } -function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletion { +function finalizeChatCompletion( + snapshot: ChatCompletionSnapshot, + params: ChatCompletionCreateParams | null, +): ParsedChatCompletion { const { id, choices, created, model, system_fingerprint, ...rest } = snapshot; - return { + const completion: ChatCompletion = { ...rest, id, choices: choices.map( ({ message, finish_reason, index, logprobs, ...choiceRest }): ChatCompletion.Choice => { - if (!finish_reason) throw new OpenAIError(`missing finish_reason for choice ${index}`); + if (!finish_reason) { + throw new OpenAIError(`missing finish_reason for choice ${index}`); + } + const { content = null, function_call, tool_calls, ...messageRest } = message; const role = message.role as 'assistant'; // this is what we expect; in theory it could be different which would make our types a slight lie but would be fine. - if (!role) throw new OpenAIError(`missing role for choice ${index}`); + if (!role) { + throw new OpenAIError(`missing role for choice ${index}`); + } + if (function_call) { const { arguments: args, name } = function_call; - if (args == null) throw new OpenAIError(`missing function_call.arguments for choice ${index}`); - if (!name) throw new OpenAIError(`missing function_call.name for choice ${index}`); + if (args == null) { + throw new OpenAIError(`missing function_call.arguments for choice ${index}`); + } + + if (!name) { + throw new OpenAIError(`missing function_call.name for choice ${index}`); + } + return { ...choiceRest, - message: { content, function_call: { arguments: args, name }, role }, + message: { + content, + function_call: { arguments: args, name }, + role, + refusal: message.refusal ?? null, + }, finish_reason, index, logprobs, }; } + if (tool_calls) { return { ...choiceRest, @@ -308,21 +658,26 @@ function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletio ...messageRest, role, content, + refusal: message.refusal ?? null, tool_calls: tool_calls.map((tool_call, i) => { const { function: fn, type, id, ...toolRest } = tool_call; const { arguments: args, name, ...fnRest } = fn || {}; - if (id == null) + if (id == null) { throw new OpenAIError(`missing choices[${index}].tool_calls[${i}].id\n${str(snapshot)}`); - if (type == null) + } + if (type == null) { throw new OpenAIError(`missing choices[${index}].tool_calls[${i}].type\n${str(snapshot)}`); - if (name == null) + } + if (name == null) { throw new OpenAIError( `missing choices[${index}].tool_calls[${i}].function.name\n${str(snapshot)}`, ); - if (args == null) + } + if (args == null) { throw new OpenAIError( `missing choices[${index}].tool_calls[${i}].function.arguments\n${str(snapshot)}`, ); + } return { ...toolRest, id, type, function: { ...fnRest, name, arguments: args } }; }), @@ -331,7 +686,7 @@ function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletio } return { ...choiceRest, - message: { ...messageRest, content, role }, + message: { ...messageRest, content, role, refusal: message.refusal ?? null }, finish_reason, index, logprobs, @@ -343,6 +698,8 @@ function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletio object: 'chat.completion', ...(system_fingerprint ? { system_fingerprint } : {}), }; + + return maybeParseChatCompletion(completion, params); } function str(x: unknown) { @@ -425,6 +782,10 @@ export namespace ChatCompletionSnapshot { */ content?: string | null; + refusal?: string | null; + + parsed?: unknown | null; + /** * The name and arguments of a function that should be called, as generated by the * model. @@ -444,14 +805,14 @@ export namespace ChatCompletionSnapshot { /** * The ID of the tool call. */ - id?: string; + id: string; - function?: ToolCall.Function; + function: ToolCall.Function; /** * The type of the tool. */ - type?: 'function'; + type: 'function'; } export namespace ToolCall { @@ -462,12 +823,14 @@ export namespace ChatCompletionSnapshot { * hallucinate parameters not defined by your function schema. Validate the * arguments in your code before calling your function. */ - arguments?: string; + arguments: string; + + parsed_arguments?: unknown; /** * The name of the function to call. */ - name?: string; + name: string; } } @@ -492,3 +855,16 @@ export namespace ChatCompletionSnapshot { } } } + +type AssertIsEmpty = keyof T extends never ? T : never; + +/** + * Ensures the given argument is an empty object, useful for + * asserting that all known properties on an object have been + * destructured. + */ +function assertIsEmpty(obj: AssertIsEmpty): asserts obj is AssertIsEmpty { + return; +} + +function assertNever(_x: never) {} diff --git a/src/lib/ChatCompletionStreamingRunner.ts b/src/lib/ChatCompletionStreamingRunner.ts index cf58c5270..ea6c74116 100644 --- a/src/lib/ChatCompletionStreamingRunner.ts +++ b/src/lib/ChatCompletionStreamingRunner.ts @@ -1,5 +1,4 @@ import { - Completions, type ChatCompletionChunk, type ChatCompletionCreateParamsStreaming, } from 'openai/resources/chat/completions'; @@ -7,6 +6,8 @@ import { RunnerOptions, type AbstractChatCompletionRunnerEvents } from './Abstra import { type ReadableStream } from 'openai/_shims/index'; import { RunnableTools, type BaseFunctionsArgs, type RunnableFunctions } from './RunnableFunction'; import { ChatCompletionSnapshot, ChatCompletionStream } from './ChatCompletionStream'; +import OpenAI from 'openai/index'; +import { AutoParseableTool } from 'openai/lib/parser'; export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents { content: (contentDelta: string, contentSnapshot: string) => void; @@ -24,45 +25,48 @@ export type ChatCompletionStreamingToolRunnerParams & { - tools: RunnableTools; + tools: RunnableTools | AutoParseableTool[]; }; -export class ChatCompletionStreamingRunner - extends ChatCompletionStream +export class ChatCompletionStreamingRunner + extends ChatCompletionStream implements AsyncIterable { - static override fromReadableStream(stream: ReadableStream): ChatCompletionStreamingRunner { - const runner = new ChatCompletionStreamingRunner(); + static override fromReadableStream(stream: ReadableStream): ChatCompletionStreamingRunner { + const runner = new ChatCompletionStreamingRunner(null); runner._run(() => runner._fromReadableStream(stream)); return runner; } /** @deprecated - please use `runTools` instead. */ static runFunctions( - completions: Completions, + client: OpenAI, params: ChatCompletionStreamingFunctionRunnerParams, options?: RunnerOptions, - ): ChatCompletionStreamingRunner { - const runner = new ChatCompletionStreamingRunner(); + ): ChatCompletionStreamingRunner { + const runner = new ChatCompletionStreamingRunner(null); const opts = { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runFunctions' }, }; - runner._run(() => runner._runFunctions(completions, params, opts)); + runner._run(() => runner._runFunctions(client, params, opts)); return runner; } - static runTools( - completions: Completions, + static runTools( + client: OpenAI, params: ChatCompletionStreamingToolRunnerParams, options?: RunnerOptions, - ): ChatCompletionStreamingRunner { - const runner = new ChatCompletionStreamingRunner(); + ): ChatCompletionStreamingRunner { + const runner = new ChatCompletionStreamingRunner( + // @ts-expect-error TODO these types are incompatible + params, + ); const opts = { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runTools' }, }; - runner._run(() => runner._runTools(completions, params, opts)); + runner._run(() => runner._runTools(client, params, opts)); return runner; } } diff --git a/src/lib/RunnableFunction.ts b/src/lib/RunnableFunction.ts index 96ca06c86..a645f5ebe 100644 --- a/src/lib/RunnableFunction.ts +++ b/src/lib/RunnableFunction.ts @@ -12,7 +12,7 @@ export type RunnableFunctionWithParse = { */ function: ( args: Args, - runner: ChatCompletionRunner | ChatCompletionStreamingRunner, + runner: ChatCompletionRunner | ChatCompletionStreamingRunner, ) => PromiseOrValue; /** * @param input the raw args from the OpenAI function call. @@ -31,6 +31,7 @@ export type RunnableFunctionWithParse = { * The name of the function to be called. Will default to function.name if omitted. */ name?: string | undefined; + strict?: boolean | undefined; }; export type RunnableFunctionWithoutParse = { @@ -40,7 +41,7 @@ export type RunnableFunctionWithoutParse = { */ function: ( args: string, - runner: ChatCompletionRunner | ChatCompletionStreamingRunner, + runner: ChatCompletionRunner | ChatCompletionStreamingRunner, ) => PromiseOrValue; /** * The parameters the function accepts, describes as a JSON Schema object. @@ -54,6 +55,7 @@ export type RunnableFunctionWithoutParse = { * The name of the function to be called. Will default to function.name if omitted. */ name?: string | undefined; + strict?: boolean | undefined; }; export type RunnableFunction = diff --git a/src/lib/parser.ts b/src/lib/parser.ts new file mode 100644 index 000000000..8bf2a3a36 --- /dev/null +++ b/src/lib/parser.ts @@ -0,0 +1,235 @@ +import { + ChatCompletion, + ChatCompletionCreateParams, + ChatCompletionMessageToolCall, + ChatCompletionTool, +} from '../resources/chat/completions'; +import { + ChatCompletionStreamingToolRunnerParams, + ChatCompletionStreamParams, + ChatCompletionToolRunnerParams, + ParsedChatCompletion, + ParsedChoice, + ParsedFunctionToolCall, +} from '../resources/beta/chat/completions'; +import { ResponseFormatJSONSchema } from '../resources/shared'; +import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from 'openai/error'; + +type AnyChatCompletionCreateParams = + | ChatCompletionCreateParams + | ChatCompletionToolRunnerParams + | ChatCompletionStreamingToolRunnerParams + | ChatCompletionStreamParams; + +export type ExtractParsedContentFromParams = + Params['response_format'] extends AutoParseableResponseFormat ? P : null; + +export type AutoParseableResponseFormat = ResponseFormatJSONSchema & { + __output: ParsedT; // type-level only + + $brand: 'auto-parseable-response-format'; + $parseRaw(content: string): ParsedT; +}; + +export function makeParseableResponseFormat( + response_format: ResponseFormatJSONSchema, + parser: (content: string) => ParsedT, +): AutoParseableResponseFormat { + const obj = { ...response_format }; + + Object.defineProperties(obj, { + $brand: { + value: 'auto-parseable-response-format', + enumerable: false, + }, + $parseRaw: { + value: parser, + enumerable: false, + }, + }); + + return obj as AutoParseableResponseFormat; +} + +export function isAutoParsableResponseFormat( + response_format: any, +): response_format is AutoParseableResponseFormat { + return response_format?.['$brand'] === 'auto-parseable-response-format'; +} + +type ToolOptions = { + name: string; + arguments: any; + function?: ((args: any) => any) | undefined; +}; + +export type AutoParseableTool< + OptionsT extends ToolOptions, + HasFunction = OptionsT['function'] extends Function ? true : false, +> = ChatCompletionTool & { + __arguments: OptionsT['arguments']; // type-level only + __name: OptionsT['name']; // type-level only + __hasFunction: HasFunction; // type-level only + + $brand: 'auto-parseable-tool'; + $callback: ((args: OptionsT['arguments']) => any) | undefined; + $parseRaw(args: string): OptionsT['arguments']; +}; + +export function makeParseableTool( + tool: ChatCompletionTool, + { + parser, + callback, + }: { + parser: (content: string) => OptionsT['arguments']; + callback: ((args: any) => any) | undefined; + }, +): AutoParseableTool { + const obj = { ...tool }; + + Object.defineProperties(obj, { + $brand: { + value: 'auto-parseable-tool', + enumerable: false, + }, + $parseRaw: { + value: parser, + enumerable: false, + }, + $callback: { + value: callback, + enumerable: false, + }, + }); + + return obj as AutoParseableTool; +} + +export function isAutoParsableTool(tool: any): tool is AutoParseableTool { + return tool?.['$brand'] === 'auto-parseable-tool'; +} + +export function maybeParseChatCompletion< + Params extends ChatCompletionCreateParams | null, + ParsedT = Params extends null ? null : ExtractParsedContentFromParams>, +>(completion: ChatCompletion, params: Params): ParsedChatCompletion { + if (!params || !hasAutoParseableInput(params)) { + return { + ...completion, + choices: completion.choices.map((choice) => ({ + ...choice, + message: { ...choice.message, parsed: null, tool_calls: choice.message.tool_calls ?? [] }, + })), + }; + } + + return parseChatCompletion(completion, params); +} + +export function parseChatCompletion< + Params extends ChatCompletionCreateParams, + ParsedT = ExtractParsedContentFromParams, +>(completion: ChatCompletion, params: Params): ParsedChatCompletion { + const choices: Array> = completion.choices.map((choice): ParsedChoice => { + if (choice.finish_reason === 'length') { + throw new LengthFinishReasonError(); + } + + if (choice.finish_reason === 'content_filter') { + throw new ContentFilterFinishReasonError(); + } + + return { + ...choice, + message: { + ...choice.message, + tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall(params, toolCall)) ?? [], + parsed: + choice.message.content && !choice.message.refusal ? + parseResponseFormat(params, choice.message.content) + : null, + }, + }; + }); + + return { ...completion, choices }; +} + +function parseResponseFormat< + Params extends ChatCompletionCreateParams, + ParsedT = ExtractParsedContentFromParams, +>(params: Params, content: string): ParsedT | null { + if (params.response_format?.type !== 'json_schema') { + return null; + } + + if (params.response_format?.type === 'json_schema') { + if ('$parseRaw' in params.response_format) { + const response_format = params.response_format as AutoParseableResponseFormat; + + return response_format.$parseRaw(content); + } + + return JSON.parse(content); + } + + return null; +} + +function parseToolCall( + params: Params, + toolCall: ChatCompletionMessageToolCall, +): ParsedFunctionToolCall { + const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name); + return { + ...toolCall, + function: { + ...toolCall.function, + parsed_arguments: + isAutoParsableTool(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments) + : inputTool?.function.strict ? JSON.parse(toolCall.function.arguments) + : null, + }, + }; +} + +export function shouldParseToolCall( + params: ChatCompletionCreateParams | null | undefined, + toolCall: ChatCompletionMessageToolCall, +): boolean { + if (!params) { + return false; + } + + const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name); + return isAutoParsableTool(inputTool) || inputTool?.function.strict || false; +} + +export function hasAutoParseableInput(params: AnyChatCompletionCreateParams): boolean { + if (isAutoParsableResponseFormat(params.response_format)) { + return true; + } + + return ( + params.tools?.some( + (t) => isAutoParsableTool(t) || (t.type === 'function' && t.function.strict === true), + ) ?? false + ); +} + +export function validateInputTools(tools: ChatCompletionTool[] | undefined) { + for (const tool of tools ?? []) { + if (tool.type !== 'function') { + throw new OpenAIError( + `Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``, + ); + } + + if (tool.function.strict !== true) { + throw new OpenAIError( + `The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`, + ); + } + } +} diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index d66b03768..8d07e45b0 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -149,6 +149,11 @@ export interface Assistant { * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which guarantees the model will match your supplied JSON schema. Learn + * more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. * @@ -648,8 +653,8 @@ export namespace FileSearchTool { export interface FileSearch { /** * The maximum number of results the file search tool should output. The default is - * 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1 - * and 50 inclusive. + * 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between + * 1 and 50 inclusive. * * Note that the file search tool may output fewer than `max_num_results` results. * See the @@ -1086,6 +1091,11 @@ export interface AssistantCreateParams { * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which guarantees the model will match your supplied JSON schema. Learn + * more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. * @@ -1278,6 +1288,11 @@ export interface AssistantUpdateParams { * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which guarantees the model will match your supplied JSON schema. Learn + * more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. * diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index cefe66824..4993d02fb 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -39,7 +39,6 @@ export namespace Beta { export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams; export import AssistantListParams = AssistantsAPI.AssistantListParams; export import Threads = ThreadsAPI.Threads; - export import AssistantResponseFormat = ThreadsAPI.AssistantResponseFormat; export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption; export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice; export import AssistantToolChoiceFunction = ThreadsAPI.AssistantToolChoiceFunction; diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts index e002b6344..96c4118bf 100644 --- a/src/resources/beta/chat/completions.ts +++ b/src/resources/beta/chat/completions.ts @@ -26,35 +26,82 @@ export { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunne import { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; export { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; import { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; +import { + ChatCompletion, + ChatCompletionCreateParamsNonStreaming, + ChatCompletionMessage, + ChatCompletionMessageToolCall, +} from '../../chat/completions'; +import { ExtractParsedContentFromParams, parseChatCompletion, validateInputTools } from '../../../lib/parser'; export { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; +export interface ParsedFunction extends ChatCompletionMessageToolCall.Function { + parsed_arguments?: unknown; +} + +export interface ParsedFunctionToolCall extends ChatCompletionMessageToolCall { + function: ParsedFunction; +} + +export interface ParsedChatCompletionMessage extends ChatCompletionMessage { + parsed: ParsedT | null; + tool_calls: Array; +} + +export interface ParsedChoice extends ChatCompletion.Choice { + message: ParsedChatCompletionMessage; +} + +export interface ParsedChatCompletion extends ChatCompletion { + choices: Array>; +} + +export type ChatCompletionParseParams = ChatCompletionCreateParamsNonStreaming; + export class Completions extends APIResource { + async parse>( + body: Params, + options?: Core.RequestOptions, + ): Promise> { + validateInputTools(body.tools); + + const completion = await this._client.chat.completions.create(body, { + ...options, + headers: { + ...options?.headers, + 'X-Stainless-Helper-Method': 'beta.chat.completions.parse', + }, + }); + + return parseChatCompletion(completion, body); + } + /** * @deprecated - use `runTools` instead. */ runFunctions( body: ChatCompletionFunctionRunnerParams, options?: Core.RequestOptions, - ): ChatCompletionRunner; + ): ChatCompletionRunner; runFunctions( body: ChatCompletionStreamingFunctionRunnerParams, options?: Core.RequestOptions, - ): ChatCompletionStreamingRunner; + ): ChatCompletionStreamingRunner; runFunctions( body: | ChatCompletionFunctionRunnerParams | ChatCompletionStreamingFunctionRunnerParams, options?: Core.RequestOptions, - ): ChatCompletionRunner | ChatCompletionStreamingRunner { + ): ChatCompletionRunner | ChatCompletionStreamingRunner { if (body.stream) { return ChatCompletionStreamingRunner.runFunctions( - this._client.chat.completions, + this._client, body as ChatCompletionStreamingFunctionRunnerParams, options, ); } return ChatCompletionRunner.runFunctions( - this._client.chat.completions, + this._client, body as ChatCompletionFunctionRunnerParams, options, ); @@ -69,38 +116,41 @@ export class Completions extends APIResource { * For more details and examples, see * [the docs](https://github.com/openai/openai-node#automated-function-calls) */ - runTools( - body: ChatCompletionToolRunnerParams, - options?: Core.RequestOptions, - ): ChatCompletionRunner; - runTools( - body: ChatCompletionStreamingToolRunnerParams, - options?: Core.RequestOptions, - ): ChatCompletionStreamingRunner; - runTools( - body: - | ChatCompletionToolRunnerParams - | ChatCompletionStreamingToolRunnerParams, + runTools< + Params extends ChatCompletionToolRunnerParams, + ParsedT = ExtractParsedContentFromParams, + >(body: Params, options?: Core.RequestOptions): ChatCompletionRunner; + + runTools< + Params extends ChatCompletionStreamingToolRunnerParams, + ParsedT = ExtractParsedContentFromParams, + >(body: Params, options?: Core.RequestOptions): ChatCompletionStreamingRunner; + + runTools< + Params extends ChatCompletionToolRunnerParams | ChatCompletionStreamingToolRunnerParams, + ParsedT = ExtractParsedContentFromParams, + >( + body: Params, options?: Core.RequestOptions, - ): ChatCompletionRunner | ChatCompletionStreamingRunner { + ): ChatCompletionRunner | ChatCompletionStreamingRunner { if (body.stream) { return ChatCompletionStreamingRunner.runTools( - this._client.chat.completions, - body as ChatCompletionStreamingToolRunnerParams, + this._client, + body as ChatCompletionStreamingToolRunnerParams, options, ); } - return ChatCompletionRunner.runTools( - this._client.chat.completions, - body as ChatCompletionToolRunnerParams, - options, - ); + + return ChatCompletionRunner.runTools(this._client, body as ChatCompletionToolRunnerParams, options); } /** * Creates a chat completion stream */ - stream(body: ChatCompletionStreamParams, options?: Core.RequestOptions): ChatCompletionStream { - return ChatCompletionStream.createChatCompletion(this._client.chat.completions, body, options); + stream>( + body: Params, + options?: Core.RequestOptions, + ): ChatCompletionStream { + return ChatCompletionStream.createChatCompletion(this._client, body, options); } } diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index 029cd084c..392be1f35 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -19,7 +19,6 @@ export { Assistants, } from './assistants'; export { - AssistantResponseFormat, AssistantResponseFormatOption, AssistantToolChoice, AssistantToolChoiceFunction, diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index b55f67edf..1964cffb8 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -22,6 +22,8 @@ export { MessageDeleted, MessageDelta, MessageDeltaEvent, + RefusalContentBlock, + RefusalDeltaBlock, Text, TextContentBlock, TextContentBlockParam, @@ -34,7 +36,6 @@ export { Messages, } from './messages'; export { - AssistantResponseFormat, AssistantResponseFormatOption, AssistantToolChoice, AssistantToolChoiceFunction, diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index db58f45b8..59c92675b 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -481,13 +481,21 @@ export namespace Message { * References an image [File](https://platform.openai.com/docs/api-reference/files) * in the content of a message. */ -export type MessageContent = ImageFileContentBlock | ImageURLContentBlock | TextContentBlock; +export type MessageContent = + | ImageFileContentBlock + | ImageURLContentBlock + | TextContentBlock + | RefusalContentBlock; /** * References an image [File](https://platform.openai.com/docs/api-reference/files) * in the content of a message. */ -export type MessageContentDelta = ImageFileDeltaBlock | TextDeltaBlock | ImageURLDeltaBlock; +export type MessageContentDelta = + | ImageFileDeltaBlock + | TextDeltaBlock + | RefusalDeltaBlock + | ImageURLDeltaBlock; /** * References an image [File](https://platform.openai.com/docs/api-reference/files) @@ -539,6 +547,35 @@ export interface MessageDeltaEvent { object: 'thread.message.delta'; } +/** + * The refusal content generated by the assistant. + */ +export interface RefusalContentBlock { + refusal: string; + + /** + * Always `refusal`. + */ + type: 'refusal'; +} + +/** + * The refusal content that is part of a message. + */ +export interface RefusalDeltaBlock { + /** + * The index of the refusal part in the message. + */ + index: number; + + /** + * Always `refusal`. + */ + type: 'refusal'; + + refusal?: string; +} + export interface Text { annotations: Array; @@ -707,6 +744,8 @@ export namespace Messages { export import MessageDeleted = MessagesAPI.MessageDeleted; export import MessageDelta = MessagesAPI.MessageDelta; export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent; + export import RefusalContentBlock = MessagesAPI.RefusalContentBlock; + export import RefusalDeltaBlock = MessagesAPI.RefusalDeltaBlock; export import Text = MessagesAPI.Text; export import TextContentBlock = MessagesAPI.TextContentBlock; export import TextContentBlockParam = MessagesAPI.TextContentBlockParam; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index db9827616..9383e70cc 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -422,6 +422,11 @@ export interface Run { * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which guarantees the model will match your supplied JSON schema. Learn + * more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. * @@ -684,6 +689,11 @@ export interface RunCreateParamsBase { * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which guarantees the model will match your supplied JSON schema. Learn + * more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. * diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 0b931a911..0ba3b4dd2 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -6,6 +6,7 @@ import { AssistantStream, ThreadCreateAndRunParamsBaseStream } from '../../../li import { APIPromise } from '../../../core'; import * as Core from '../../../core'; import * as ThreadsAPI from './threads'; +import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; import * as ChatAPI from '../../chat/chat'; import * as MessagesAPI from './messages'; @@ -118,15 +119,16 @@ export class Threads extends APIResource { } /** - * An object describing the expected output of the model. If `json_object` only - * `function` type `tools` are allowed to be passed to the Run. If `text` the model - * can return text or any value needed. +<<<<<<< HEAD + * An object describing the expected output of the model. If `json_object` or + * `json_schema`, only `function` type `tools` are allowed to be passed to the Run. + * If `text` the model can return text or any value needed. */ export interface AssistantResponseFormat { /** - * Must be one of `text` or `json_object`. + * Must be one of `text`, `json_object` or `json_schema`. */ - type?: 'text' | 'json_object'; + type?: 'text' | 'json_object' | 'json_schema'; } /** @@ -135,6 +137,11 @@ export interface AssistantResponseFormat { * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which guarantees the model will match your supplied JSON schema. Learn + * more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. * @@ -146,7 +153,11 @@ export interface AssistantResponseFormat { * indicates the generation exceeded `max_tokens` or the conversation exceeded the * max context length. */ -export type AssistantResponseFormatOption = 'none' | 'auto' | AssistantResponseFormat; +export type AssistantResponseFormatOption = + | 'auto' + | Shared.ResponseFormatText + | Shared.ResponseFormatJSONObject + | Shared.ResponseFormatJSONSchema; /** * Specifies a tool the model should use. Use to force the model to call a specific @@ -561,6 +572,11 @@ export interface ThreadCreateAndRunParamsBase { * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which guarantees the model will match your supplied JSON schema. Learn + * more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. * @@ -1564,7 +1580,6 @@ export namespace ThreadCreateAndRunStreamParams { } export namespace Threads { - export import AssistantResponseFormat = ThreadsAPI.AssistantResponseFormat; export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption; export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice; export import AssistantToolChoiceFunction = ThreadsAPI.AssistantToolChoiceFunction; @@ -1618,6 +1633,8 @@ export namespace Threads { export import MessageDeleted = MessagesAPI.MessageDeleted; export import MessageDelta = MessagesAPI.MessageDelta; export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent; + export import RefusalContentBlock = MessagesAPI.RefusalContentBlock; + export import RefusalDeltaBlock = MessagesAPI.RefusalDeltaBlock; export import Text = MessagesAPI.Text; export import TextContentBlock = MessagesAPI.TextContentBlock; export import TextContentBlockParam = MessagesAPI.TextContentBlockParam; diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index 594c51970..c0f695223 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -232,7 +232,7 @@ export namespace VectorStoreFile { /** * One of `server_error` or `rate_limit_exceeded`. */ - code: 'internal_error' | 'file_not_found' | 'parsing_error' | 'unhandled_mime_type'; + code: 'server_error' | 'unsupported_file' | 'invalid_file'; /** * A human-readable description of the error. diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 74cda326e..031b4059b 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -10,6 +10,7 @@ export class Chat extends APIResource { export type ChatModel = | 'gpt-4o' + | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-05-13' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' @@ -41,6 +42,7 @@ export namespace Chat { export import ChatCompletionChunk = CompletionsAPI.ChatCompletionChunk; export import ChatCompletionContentPart = CompletionsAPI.ChatCompletionContentPart; export import ChatCompletionContentPartImage = CompletionsAPI.ChatCompletionContentPartImage; + export import ChatCompletionContentPartRefusal = CompletionsAPI.ChatCompletionContentPartRefusal; export import ChatCompletionContentPartText = CompletionsAPI.ChatCompletionContentPartText; export import ChatCompletionFunctionCallOption = CompletionsAPI.ChatCompletionFunctionCallOption; export import ChatCompletionFunctionMessageParam = CompletionsAPI.ChatCompletionFunctionMessageParam; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 4027e995b..91d7da801 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -123,6 +123,11 @@ export namespace ChatCompletion { * A list of message content tokens with log probability information. */ content: Array | null; + + /** + * A list of message refusal tokens with log probability information. + */ + refusal: Array | null; } } } @@ -137,7 +142,7 @@ export interface ChatCompletionAssistantMessageParam { * The contents of the assistant message. Required unless `tool_calls` or * `function_call` is specified. */ - content?: string | null; + content?: string | Array | null; /** * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of @@ -151,6 +156,11 @@ export interface ChatCompletionAssistantMessageParam { */ name?: string; + /** + * The refusal message by the assistant. + */ + refusal?: string | null; + /** * The tool calls generated by the model, such as function calls. */ @@ -277,6 +287,11 @@ export namespace ChatCompletionChunk { */ function_call?: Delta.FunctionCall; + /** + * The refusal message generated by the model. + */ + refusal?: string | null; + /** * The role of the author of this message. */ @@ -347,6 +362,11 @@ export namespace ChatCompletionChunk { * A list of message content tokens with log probability information. */ content: Array | null; + + /** + * A list of message refusal tokens with log probability information. + */ + refusal: Array | null; } } } @@ -377,6 +397,18 @@ export namespace ChatCompletionContentPartImage { } } +export interface ChatCompletionContentPartRefusal { + /** + * The refusal message generated by the model. + */ + refusal: string; + + /** + * The type of the content part. + */ + type: 'refusal'; +} + export interface ChatCompletionContentPartText { /** * The text content. @@ -429,6 +461,11 @@ export interface ChatCompletionMessage { */ content: string | null; + /** + * The refusal message generated by the model. + */ + refusal: string | null; + /** * The role of the author of this message. */ @@ -555,7 +592,7 @@ export interface ChatCompletionSystemMessageParam { /** * The contents of the system message. */ - content: string; + content: string | Array; /** * The role of the messages author, in this case `system`. @@ -648,7 +685,7 @@ export interface ChatCompletionToolMessageParam { /** * The contents of the tool message. */ - content: string; + content: string | Array; /** * The role of the messages author, in this case `tool`. @@ -787,6 +824,8 @@ export interface ChatCompletionCreateParamsBase { /** * An object specifying the format that the model must output. Compatible with + * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), + * [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. * @@ -801,7 +840,10 @@ export interface ChatCompletionCreateParamsBase { * indicates the generation exceeded `max_tokens` or the conversation exceeded the * max context length. */ - response_format?: ChatCompletionCreateParams.ResponseFormat; + response_format?: + | Shared.ResponseFormatText + | Shared.ResponseFormatJSONObject + | Shared.ResponseFormatJSONSchema; /** * This feature is in Beta. If specified, our system will make a best effort to @@ -929,29 +971,6 @@ export namespace ChatCompletionCreateParams { parameters?: Shared.FunctionParameters; } - /** - * An object specifying the format that the model must output. Compatible with - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - * - * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - * message the model generates is valid JSON. - * - * **Important:** when using JSON mode, you **must** also instruct the model to - * produce JSON yourself via a system or user message. Without this, the model may - * generate an unending stream of whitespace until the generation reaches the token - * limit, resulting in a long-running and seemingly "stuck" request. Also note that - * the message content may be partially cut off if `finish_reason="length"`, which - * indicates the generation exceeded `max_tokens` or the conversation exceeded the - * max context length. - */ - export interface ResponseFormat { - /** - * Must be one of `text` or `json_object`. - */ - type?: 'text' | 'json_object'; - } - export type ChatCompletionCreateParamsNonStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsNonStreaming; export type ChatCompletionCreateParamsStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsStreaming; @@ -1002,6 +1021,7 @@ export namespace Completions { export import ChatCompletionChunk = ChatCompletionsAPI.ChatCompletionChunk; export import ChatCompletionContentPart = ChatCompletionsAPI.ChatCompletionContentPart; export import ChatCompletionContentPartImage = ChatCompletionsAPI.ChatCompletionContentPartImage; + export import ChatCompletionContentPartRefusal = ChatCompletionsAPI.ChatCompletionContentPartRefusal; export import ChatCompletionContentPartText = ChatCompletionsAPI.ChatCompletionContentPartText; export import ChatCompletionFunctionCallOption = ChatCompletionsAPI.ChatCompletionFunctionCallOption; export import ChatCompletionFunctionMessageParam = ChatCompletionsAPI.ChatCompletionFunctionMessageParam; diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index 2761385c2..748770948 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -6,6 +6,7 @@ export { ChatCompletionChunk, ChatCompletionContentPart, ChatCompletionContentPartImage, + ChatCompletionContentPartRefusal, ChatCompletionContentPartText, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index c4aae364a..aeb646279 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -299,9 +299,9 @@ export interface FineTuningJobWandbIntegrationObject { export interface JobCreateParams { /** * The name of the model to fine-tune. You can select one of the - * [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + * [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). */ - model: (string & {}) | 'babbage-002' | 'davinci-002' | 'gpt-3.5-turbo'; + model: (string & {}) | 'babbage-002' | 'davinci-002' | 'gpt-3.5-turbo' | 'gpt-4o-mini'; /** * The ID of an uploaded file that contains training data. @@ -344,7 +344,7 @@ export interface JobCreateParams { * name. * * For example, a `suffix` of "custom-model-name" would produce a model name like - * `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + * `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. */ suffix?: string | null; diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 45969ea65..f44fda8a7 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -33,6 +33,15 @@ export interface FunctionDefinition { * Omitting `parameters` defines a function with an empty parameter list. */ parameters?: FunctionParameters; + + /** + * Whether to enable strict schema adherence when generating the function call. If + * set to true, the model will follow the exact schema defined in the `parameters` + * field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn + * more about Structured Outputs in the + * [function calling guide](docs/guides/function-calling). + */ + strict?: boolean | null; } /** @@ -45,3 +54,56 @@ export interface FunctionDefinition { * Omitting `parameters` defines a function with an empty parameter list. */ export type FunctionParameters = Record; + +export interface ResponseFormatJSONObject { + /** + * The type of response format being defined: `json_object` + */ + type: 'json_object'; +} + +export interface ResponseFormatJSONSchema { + json_schema: ResponseFormatJSONSchema.JSONSchema; + + /** + * The type of response format being defined: `json_schema` + */ + type: 'json_schema'; +} + +export namespace ResponseFormatJSONSchema { + export interface JSONSchema { + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + */ + name: string; + + /** + * A description of what the response format is for, used by the model to determine + * how to respond in the format. + */ + description?: string; + + /** + * The schema for the response format, described as a JSON Schema object. + */ + schema?: Record; + + /** + * Whether to enable strict schema adherence when generating the output. If set to + * true, the model will always follow the exact schema defined in the `schema` + * field. Only a subset of JSON Schema is supported when `strict` is `true`. To + * learn more, read the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + */ + strict?: boolean | null; + } +} + +export interface ResponseFormatText { + /** + * The type of response format being defined: `text` + */ + type: 'text'; +} diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index 657cd76a6..13fec377d 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -10,7 +10,7 @@ const client = new OpenAI({ describe('resource assistants', () => { test('create: only required params', async () => { - const responsePromise = client.beta.assistants.create({ model: 'gpt-4-turbo' }); + const responsePromise = client.beta.assistants.create({ model: 'gpt-4o' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -22,12 +22,12 @@ describe('resource assistants', () => { test('create: required and optional params', async () => { const response = await client.beta.assistants.create({ - model: 'gpt-4-turbo', + model: 'gpt-4o', description: 'description', instructions: 'instructions', metadata: {}, name: 'name', - response_format: 'none', + response_format: 'auto', temperature: 1, tool_resources: { code_interpreter: { file_ids: ['string', 'string', 'string'] }, diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 856eb8662..f6a7dead6 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -123,9 +123,9 @@ describe('resource runs', () => { max_completion_tokens: 256, max_prompt_tokens: 256, metadata: {}, - model: 'gpt-4-turbo', + model: 'gpt-4o', parallel_tool_calls: true, - response_format: 'none', + response_format: 'auto', stream: false, temperature: 1, tool_choice: 'none', diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index 2a5ebfd82..abf631adb 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -212,9 +212,9 @@ describe('resource threads', () => { max_completion_tokens: 256, max_prompt_tokens: 256, metadata: {}, - model: 'gpt-4-turbo', + model: 'gpt-4o', parallel_tool_calls: true, - response_format: 'none', + response_format: 'auto', stream: false, temperature: 1, thread: { diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 78314074f..5cdd1e670 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -11,8 +11,8 @@ const client = new OpenAI({ describe('resource completions', () => { test('create: only required params', async () => { const responsePromise = client.chat.completions.create({ - messages: [{ content: 'content', role: 'system' }], - model: 'gpt-4-turbo', + messages: [{ content: 'string', role: 'system' }], + model: 'gpt-4o', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -25,8 +25,8 @@ describe('resource completions', () => { test('create: required and optional params', async () => { const response = await client.chat.completions.create({ - messages: [{ content: 'content', role: 'system', name: 'name' }], - model: 'gpt-4-turbo', + messages: [{ content: 'string', role: 'system', name: 'name' }], + model: 'gpt-4o', frequency_penalty: -2, function_call: 'none', functions: [{ description: 'description', name: 'name', parameters: { foo: 'bar' } }], @@ -36,7 +36,7 @@ describe('resource completions', () => { n: 1, parallel_tool_calls: true, presence_penalty: -2, - response_format: { type: 'json_object' }, + response_format: { type: 'text' }, seed: -9007199254740991, service_tier: 'auto', stop: 'string', @@ -47,15 +47,15 @@ describe('resource completions', () => { tools: [ { type: 'function', - function: { description: 'description', name: 'name', parameters: { foo: 'bar' } }, + function: { description: 'description', name: 'name', parameters: { foo: 'bar' }, strict: true }, }, { type: 'function', - function: { description: 'description', name: 'name', parameters: { foo: 'bar' } }, + function: { description: 'description', name: 'name', parameters: { foo: 'bar' }, strict: true }, }, { type: 'function', - function: { description: 'description', name: 'name', parameters: { foo: 'bar' } }, + function: { description: 'description', name: 'name', parameters: { foo: 'bar' }, strict: true }, }, ], top_logprobs: 0, diff --git a/tests/api-resources/fine-tuning/jobs/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts index 04de7ee21..e683dfe3e 100644 --- a/tests/api-resources/fine-tuning/jobs/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -11,7 +11,7 @@ const client = new OpenAI({ describe('resource jobs', () => { test('create: only required params', async () => { const responsePromise = client.fineTuning.jobs.create({ - model: 'gpt-3.5-turbo', + model: 'gpt-4o-mini', training_file: 'file-abc123', }); const rawResponse = await responsePromise.asResponse(); @@ -25,7 +25,7 @@ describe('resource jobs', () => { test('create: required and optional params', async () => { const response = await client.fineTuning.jobs.create({ - model: 'gpt-3.5-turbo', + model: 'gpt-4o-mini', training_file: 'file-abc123', hyperparameters: { batch_size: 'auto', learning_rate_multiplier: 'auto', n_epochs: 'auto' }, integrations: [ diff --git a/tests/api-resources/models.test.ts b/tests/api-resources/models.test.ts index eee91d020..23ebd1bb6 100644 --- a/tests/api-resources/models.test.ts +++ b/tests/api-resources/models.test.ts @@ -10,7 +10,7 @@ const client = new OpenAI({ describe('resource models', () => { test('retrieve', async () => { - const responsePromise = client.models.retrieve('gpt-3.5-turbo'); + const responsePromise = client.models.retrieve('gpt-4o-mini'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -22,9 +22,9 @@ describe('resource models', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.models.retrieve('gpt-3.5-turbo', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(OpenAI.NotFoundError); + await expect(client.models.retrieve('gpt-4o-mini', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); }); test('list', async () => { @@ -46,7 +46,7 @@ describe('resource models', () => { }); test('del', async () => { - const responsePromise = client.models.del('ft:gpt-3.5-turbo:acemeco:suffix:abc123'); + const responsePromise = client.models.del('ft:gpt-4o-mini:acemeco:suffix:abc123'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -59,7 +59,7 @@ describe('resource models', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.models.del('ft:gpt-3.5-turbo:acemeco:suffix:abc123', { path: '/_stainless_unknown_path' }), + client.models.del('ft:gpt-4o-mini:acemeco:suffix:abc123', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/helpers/zod.test.ts b/tests/helpers/zod.test.ts new file mode 100644 index 000000000..1ad4b7475 --- /dev/null +++ b/tests/helpers/zod.test.ts @@ -0,0 +1,269 @@ +import { zodResponseFormat } from 'openai/helpers/zod'; +import { z } from 'zod'; + +describe('zodResponseFormat', () => { + it('does the thing', () => { + expect( + zodResponseFormat( + z.object({ + city: z.string(), + temperature: z.number(), + units: z.enum(['c', 'f']), + }), + 'location', + ).json_schema, + ).toMatchInlineSnapshot(` + { + "name": "location", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "city": { + "type": "string", + }, + "temperature": { + "type": "number", + }, + "units": { + "enum": [ + "c", + "f", + ], + "type": "string", + }, + }, + "required": [ + "city", + "temperature", + "units", + ], + "type": "object", + }, + "strict": true, + } + `); + }); + + it('automatically adds optional properties to `required`', () => { + expect( + zodResponseFormat( + z.object({ + city: z.string(), + temperature: z.number(), + units: z.enum(['c', 'f']).optional(), + }), + 'location', + ).json_schema, + ).toMatchInlineSnapshot(` + { + "name": "location", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "city": { + "type": "string", + }, + "temperature": { + "type": "number", + }, + "units": { + "enum": [ + "c", + "f", + ], + "type": "string", + }, + }, + "required": [ + "city", + "temperature", + "units", + ], + "type": "object", + }, + "strict": true, + } + `); + }); + + it('automatically adds properties with defaults to `required`', () => { + expect( + zodResponseFormat( + z.object({ + city: z.string(), + temperature: z.number(), + units: z.enum(['c', 'f']).default('c'), + }), + 'location', + ).json_schema, + ).toMatchInlineSnapshot(` + { + "name": "location", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "city": { + "type": "string", + }, + "temperature": { + "type": "number", + }, + "units": { + "default": "c", + "enum": [ + "c", + "f", + ], + "type": "string", + }, + }, + "required": [ + "city", + "temperature", + "units", + ], + "type": "object", + }, + "strict": true, + } + `); + }); + + test('kitchen sink types', () => { + const Table = z.enum(['orders', 'customers', 'products']); + + const Column = z.enum([ + 'id', + 'status', + 'expected_delivery_date', + 'delivered_at', + 'shipped_at', + 'ordered_at', + 'canceled_at', + ]); + + const Operator = z.enum(['=', '>', '<', '<=', '>=', '!=']); + + const OrderBy = z.enum(['asc', 'desc']); + + const DynamicValue = z.object({ + column_name: z.string(), + }); + + const Condition = z.object({ + column: z.string(), + operator: Operator, + value: z.union([z.string(), z.number(), DynamicValue]), + }); + + const Query = z.object({ + table_name: Table, + columns: z.array(Column), + conditions: z.array(Condition), + order_by: OrderBy, + }); + + expect(zodResponseFormat(Query, 'query').json_schema).toMatchInlineSnapshot(` + { + "name": "query", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "properties": { + "columns": { + "items": { + "enum": [ + "id", + "status", + "expected_delivery_date", + "delivered_at", + "shipped_at", + "ordered_at", + "canceled_at", + ], + "type": "string", + }, + "type": "array", + }, + "conditions": { + "items": { + "additionalProperties": false, + "properties": { + "column": { + "type": "string", + }, + "operator": { + "enum": [ + "=", + ">", + "<", + "<=", + ">=", + "!=", + ], + "type": "string", + }, + "value": { + "anyOf": [ + { + "type": "string", + }, + { + "type": "number", + }, + { + "additionalProperties": false, + "properties": { + "column_name": { + "type": "string", + }, + }, + "required": [ + "column_name", + ], + "type": "object", + }, + ], + }, + }, + "required": [ + "column", + "operator", + "value", + ], + "type": "object", + }, + "type": "array", + }, + "order_by": { + "enum": [ + "asc", + "desc", + ], + "type": "string", + }, + "table_name": { + "enum": [ + "orders", + "customers", + "products", + ], + "type": "string", + }, + }, + "required": [ + "table_name", + "columns", + "conditions", + "order_by", + ], + "type": "object", + }, + "strict": true, + } + `); + }); +}); diff --git a/src/lib/ChatCompletionRunFunctions.test.ts b/tests/lib/ChatCompletionRunFunctions.test.ts similarity index 91% rename from src/lib/ChatCompletionRunFunctions.test.ts rename to tests/lib/ChatCompletionRunFunctions.test.ts index b524218ae..cddfe4a5f 100644 --- a/src/lib/ChatCompletionRunFunctions.test.ts +++ b/tests/lib/ChatCompletionRunFunctions.test.ts @@ -9,76 +9,9 @@ import { type ChatCompletionStreamingFunctionRunnerParams, } from 'openai/resources/beta/chat/completions'; import type { ChatCompletionMessageParam } from 'openai/resources/chat/completions'; - -import { type RequestInfo, type RequestInit } from 'openai/_shims/index'; import { Response } from 'node-fetch'; -import { isAssistantMessage } from './chatCompletionUtils'; - -type Fetch = (req: string | RequestInfo, init?: RequestInit) => Promise; - -/** - * Creates a mock `fetch` function and a `handleRequest` function for intercepting `fetch` calls. - * - * You call `handleRequest` with a callback function that handles the next `fetch` call. - * It returns a Promise that: - * - waits for the next call to `fetch` - * - calls the callback with the `fetch` arguments - * - resolves `fetch` with the callback output - */ -function mockFetch(): { fetch: Fetch; handleRequest: (handle: Fetch) => Promise } { - const fetchQueue: ((handler: typeof fetch) => void)[] = []; - const handlerQueue: Promise[] = []; - - const enqueueHandler = () => { - handlerQueue.push( - new Promise((resolve) => { - fetchQueue.push((handle: typeof fetch) => { - enqueueHandler(); - resolve(handle); - }); - }), - ); - }; - enqueueHandler(); - - async function fetch(req: string | RequestInfo, init?: RequestInit): Promise { - const handler = await handlerQueue.shift(); - if (!handler) throw new Error('expected handler to be defined'); - const signal = init?.signal; - if (!signal) return await handler(req, init); - return await Promise.race([ - handler(req, init), - new Promise((resolve, reject) => { - if (signal.aborted) { - // @ts-ignore does exist in Node - reject(new DOMException('The user aborted a request.', 'AbortError')); - return; - } - signal.addEventListener('abort', (e) => { - // @ts-ignore does exist in Node - reject(new DOMException('The user aborted a request.', 'AbortError')); - }); - }), - ]); - } - - function handleRequest(handle: typeof fetch): Promise { - return new Promise((resolve, reject) => { - fetchQueue.shift()?.(async (req, init) => { - try { - return await handle(req, init); - } catch (err) { - reject(err); - return err as any; - } finally { - resolve(); - } - }); - }); - } - - return { fetch, handleRequest }; -} +import { isAssistantMessage } from '../../src/lib/chatCompletionUtils'; +import { mockFetch } from '../utils/mock-fetch'; // mockChatCompletionFetch is like mockFetch, but with better a more convenient handleRequest to mock // chat completion request/responses. @@ -213,7 +146,7 @@ class RunnerListener { onceMessageCallCount = 0; - constructor(public runner: ChatCompletionRunner) { + constructor(public runner: ChatCompletionRunner) { runner .on('connect', () => (this.gotConnect = true)) .on('content', (content) => this.contents.push(content)) @@ -327,7 +260,7 @@ class StreamingRunnerListener { gotConnect = false; gotEnd = false; - constructor(public runner: ChatCompletionStreamingRunner) { + constructor(public runner: ChatCompletionStreamingRunner) { runner .on('connect', () => (this.gotConnect = true)) .on('chunk', (chunk) => this.eventChunks.push(chunk)) @@ -598,6 +531,8 @@ describe('resource completions', () => { message: { role: 'assistant', content: null, + refusal: null, + parsed: null, tool_calls: [ { type: 'function', @@ -619,10 +554,15 @@ describe('resource completions', () => { await handleRequest(async (request) => { expect(request.messages).toEqual([ - { role: 'user', content: 'tell me what the weather is like' }, + { + role: 'user', + content: 'tell me what the weather is like', + }, { role: 'assistant', content: null, + refusal: null, + parsed: null, tool_calls: [ { type: 'function', @@ -630,6 +570,7 @@ describe('resource completions', () => { function: { arguments: '', name: 'getWeather', + parsed_arguments: null, }, }, ], @@ -651,6 +592,7 @@ describe('resource completions', () => { message: { role: 'assistant', content: `it's raining`, + refusal: null, }, }, ], @@ -667,6 +609,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -674,12 +618,19 @@ describe('resource completions', () => { function: { arguments: '', name: 'getWeather', + parsed_arguments: null, }, }, ], }, { role: 'tool', content: `it's raining`, tool_call_id: '123' }, - { role: 'assistant', content: "it's raining" }, + { + role: 'assistant', + content: "it's raining", + parsed: null, + refusal: null, + tool_calls: [], + }, ]); expect(listener.functionCallResults).toEqual([`it's raining`]); await listener.sanityCheck(); @@ -723,6 +674,8 @@ describe('resource completions', () => { message: { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -751,6 +704,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -758,6 +713,7 @@ describe('resource completions', () => { function: { arguments: '', name: 'getWeather', + parsed_arguments: null, }, }, ], @@ -816,6 +772,8 @@ describe('resource completions', () => { message: { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -849,6 +807,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -856,6 +816,7 @@ describe('resource completions', () => { function: { arguments: '{"a": 1, "b": 2, "c": 3}', name: 'numProperties', + parsed_arguments: null, }, }, ], @@ -876,6 +837,7 @@ describe('resource completions', () => { message: { role: 'assistant', content: `there are 3 properties in {"a": 1, "b": 2, "c": 3}`, + refusal: null, }, }, ], @@ -900,16 +862,28 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', id: '123', - function: { name: 'numProperties', arguments: '{"a": 1, "b": 2, "c": 3}' }, + function: { + name: 'numProperties', + arguments: '{"a": 1, "b": 2, "c": 3}', + parsed_arguments: null, + }, }, ], }, { role: 'tool', content: '3', tool_call_id: '123' }, - { role: 'assistant', content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}' }, + { + role: 'assistant', + content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', + parsed: null, + refusal: null, + tool_calls: [], + }, ]); expect(listener.functionCallResults).toEqual(['3']); await listener.sanityCheck(); @@ -963,6 +937,8 @@ describe('resource completions', () => { message: { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -990,6 +966,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -997,6 +975,7 @@ describe('resource completions', () => { function: { arguments: '[{"a": 1, "b": 2, "c": 3}]', name: 'numProperties', + parsed_arguments: null, }, }, ], @@ -1017,6 +996,8 @@ describe('resource completions', () => { message: { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1044,6 +1025,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1051,6 +1034,7 @@ describe('resource completions', () => { function: { arguments: '[{"a": 1, "b": 2, "c": 3}]', name: 'numProperties', + parsed_arguments: null, }, }, ], @@ -1063,6 +1047,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1070,6 +1056,7 @@ describe('resource completions', () => { function: { arguments: '{"a": 1, "b": 2, "c": 3}', name: 'numProperties', + parsed_arguments: null, }, }, ], @@ -1090,6 +1077,7 @@ describe('resource completions', () => { message: { role: 'assistant', content: `there are 3 properties in {"a": 1, "b": 2, "c": 3}`, + refusal: null, }, }, ], @@ -1109,11 +1097,17 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', id: '123', - function: { name: 'numProperties', arguments: '[{"a": 1, "b": 2, "c": 3}]' }, + function: { + name: 'numProperties', + arguments: '[{"a": 1, "b": 2, "c": 3}]', + parsed_arguments: null, + }, }, ], }, @@ -1121,16 +1115,28 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', id: '1234', - function: { name: 'numProperties', arguments: '{"a": 1, "b": 2, "c": 3}' }, + function: { + name: 'numProperties', + arguments: '{"a": 1, "b": 2, "c": 3}', + parsed_arguments: null, + }, }, ], }, { role: 'tool', content: '3', tool_call_id: '1234' }, - { role: 'assistant', content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}' }, + { + role: 'assistant', + content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', + parsed: null, + refusal: null, + tool_calls: [], + }, ]); expect(listener.functionCallResults).toEqual([`must be an object`, '3']); await listener.sanityCheck(); @@ -1177,6 +1183,8 @@ describe('resource completions', () => { message: { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1203,6 +1211,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1210,6 +1220,7 @@ describe('resource completions', () => { function: { arguments: '', name: 'getWeather', + parsed_arguments: null, }, }, ], @@ -1255,6 +1266,8 @@ describe('resource completions', () => { message: { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1279,6 +1292,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1286,6 +1301,7 @@ describe('resource completions', () => { function: { arguments: '', name: 'get_weather', + parsed_arguments: null, }, }, ], @@ -1306,6 +1322,8 @@ describe('resource completions', () => { message: { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1330,6 +1348,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1337,6 +1357,7 @@ describe('resource completions', () => { function: { arguments: '', name: 'get_weather', + parsed_arguments: null, }, }, ], @@ -1349,6 +1370,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1356,6 +1379,7 @@ describe('resource completions', () => { function: { arguments: '', name: 'getWeather', + parsed_arguments: null, }, }, ], @@ -1375,6 +1399,7 @@ describe('resource completions', () => { logprobs: null, message: { role: 'assistant', + refusal: null, content: `it's raining`, }, }, @@ -1392,7 +1417,15 @@ describe('resource completions', () => { { role: 'assistant', content: null, - tool_calls: [{ type: 'function', id: '123', function: { name: 'get_weather', arguments: '' } }], + parsed: null, + refusal: null, + tool_calls: [ + { + type: 'function', + id: '123', + function: { name: 'get_weather', arguments: '', parsed_arguments: null }, + }, + ], }, { role: 'tool', @@ -1402,10 +1435,28 @@ describe('resource completions', () => { { role: 'assistant', content: null, - tool_calls: [{ type: 'function', id: '1234', function: { name: 'getWeather', arguments: '' } }], + parsed: null, + refusal: null, + tool_calls: [ + { + type: 'function', + id: '1234', + function: { + name: 'getWeather', + arguments: '', + parsed_arguments: null, + }, + }, + ], }, { role: 'tool', content: `it's raining`, tool_call_id: '1234' }, - { role: 'assistant', content: "it's raining" }, + { + role: 'assistant', + content: "it's raining", + parsed: null, + refusal: null, + tool_calls: [], + }, ]); expect(listener.functionCallResults).toEqual([ `Invalid tool_call: "get_weather". Available options are: "getWeather". Please try again`, @@ -1478,6 +1529,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1512,6 +1565,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1524,7 +1579,13 @@ describe('resource completions', () => { ], }, { role: 'tool', content: `it's raining`, tool_call_id: '123' }, - { role: 'assistant', content: "it's raining" }, + { + role: 'assistant', + content: "it's raining", + parsed: null, + refusal: null, + tool_calls: [], + }, ]); expect(listener.eventFunctionCallResults).toEqual([`it's raining`]); await listener.sanityCheck(); @@ -1595,6 +1656,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1689,6 +1752,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1723,16 +1788,27 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', id: '123', - function: { name: 'numProperties', arguments: '{"a": 1, "b": 2, "c": 3}' }, + function: { + name: 'numProperties', + arguments: '{"a": 1, "b": 2, "c": 3}', + }, }, ], }, { role: 'tool', content: '3', tool_call_id: '123' }, - { role: 'assistant', content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}' }, + { + role: 'assistant', + content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', + parsed: null, + refusal: null, + tool_calls: [], + }, ]); expect(listener.eventFunctionCallResults).toEqual(['3']); await listener.sanityCheck(); @@ -1799,6 +1875,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1838,6 +1916,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1857,6 +1937,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -1891,11 +1973,16 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', id: '123', - function: { name: 'numProperties', arguments: '[{"a": 1, "b": 2, "c": 3}]' }, + function: { + name: 'numProperties', + arguments: '[{"a": 1, "b": 2, "c": 3}]', + }, }, ], }, @@ -1903,16 +1990,27 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', id: '1234', - function: { name: 'numProperties', arguments: '{"a": 1, "b": 2, "c": 3}' }, + function: { + name: 'numProperties', + arguments: '{"a": 1, "b": 2, "c": 3}', + }, }, ], }, { role: 'tool', content: '3', tool_call_id: '1234' }, - { role: 'assistant', content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}' }, + { + role: 'assistant', + content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', + parsed: null, + refusal: null, + tool_calls: [], + }, ]); expect(listener.eventFunctionCallResults).toEqual([`must be an object`, '3']); await listener.sanityCheck(); @@ -1985,6 +2083,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -2062,6 +2162,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -2114,6 +2216,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -2133,6 +2237,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -2167,6 +2273,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -2186,6 +2294,8 @@ describe('resource completions', () => { { role: 'assistant', content: null, + parsed: null, + refusal: null, tool_calls: [ { type: 'function', @@ -2198,7 +2308,13 @@ describe('resource completions', () => { ], }, { role: 'tool', content: `it's raining`, tool_call_id: '1234' }, - { role: 'assistant', content: "it's raining" }, + { + role: 'assistant', + content: "it's raining", + parsed: null, + refusal: null, + tool_calls: [], + }, ]); expect(listener.eventFunctionCallResults).toEqual([ `Invalid tool_call: "get_weather". Available options are: "getWeather". Please try again`, @@ -2238,7 +2354,13 @@ describe('resource completions', () => { runner.done(), ]); - expect(listener.finalMessage).toEqual({ role: 'assistant', content: 'The weather is great today!' }); + expect(listener.finalMessage).toEqual({ + role: 'assistant', + content: 'The weather is great today!', + parsed: null, + refusal: null, + tool_calls: [], + }); await listener.sanityCheck(); }); test('toReadableStream and fromReadableStream', async () => { @@ -2271,7 +2393,13 @@ describe('resource completions', () => { proxied.done(), ]); - expect(listener.finalMessage).toEqual({ role: 'assistant', content: 'The weather is great today!' }); + expect(listener.finalMessage).toEqual({ + role: 'assistant', + content: 'The weather is great today!', + parsed: null, + refusal: null, + tool_calls: [], + }); await listener.sanityCheck(); }); test('handles network errors', async () => { diff --git a/tests/lib/ChatCompletionStream.test.ts b/tests/lib/ChatCompletionStream.test.ts new file mode 100644 index 000000000..90d551262 --- /dev/null +++ b/tests/lib/ChatCompletionStream.test.ts @@ -0,0 +1,383 @@ +import { zodResponseFormat } from 'openai/helpers/zod'; +import { ChatCompletionTokenLogprob } from 'openai/resources'; +import { z } from 'zod'; +import { makeStreamSnapshotRequest } from '../utils/mock-snapshots'; + +jest.setTimeout(1000 * 30); + +describe('.stream()', () => { + it('works', async () => { + const stream = await makeStreamSnapshotRequest((openai) => + openai.beta.chat.completions.stream({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'user', + content: "What's the weather like in SF?", + }, + ], + response_format: zodResponseFormat( + z.object({ + city: z.string(), + units: z.enum(['c', 'f']).default('f'), + }), + 'location', + ), + }), + ); + + expect((await stream.finalChatCompletion()).choices[0]).toMatchInlineSnapshot(` + { + "finish_reason": "stop", + "index": 0, + "logprobs": null, + "message": { + "content": "{"city":"San Francisco","units":"f"}", + "parsed": { + "city": "San Francisco", + "units": "f", + }, + "refusal": null, + "role": "assistant", + "tool_calls": [], + }, + } + `); + }); + + it('emits content logprobs events', async () => { + var capturedLogProbs: ChatCompletionTokenLogprob[] | undefined; + + const stream = ( + await makeStreamSnapshotRequest((openai) => + openai.beta.chat.completions.stream({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'user', + content: "What's the weather like in SF?", + }, + ], + logprobs: true, + response_format: zodResponseFormat( + z.object({ + city: z.string(), + units: z.enum(['c', 'f']).default('f'), + }), + 'location', + ), + }), + ) + ).on('logprobs.content.done', (props) => { + if (!capturedLogProbs?.length) { + capturedLogProbs = props.content; + } + }); + + const choice = (await stream.finalChatCompletion()).choices[0]; + expect(choice).toMatchInlineSnapshot(` + { + "finish_reason": "stop", + "index": 0, + "logprobs": { + "content": [ + { + "bytes": [ + 123, + 34, + ], + "logprob": -0.0010631788, + "token": "{"", + "top_logprobs": [], + }, + { + "bytes": [ + 99, + 105, + 116, + 121, + ], + "logprob": -0.0000017432603, + "token": "city", + "top_logprobs": [], + }, + { + "bytes": [ + 34, + 58, + 34, + ], + "logprob": -0.00018554063, + "token": "":"", + "top_logprobs": [], + }, + { + "bytes": [ + 83, + 97, + 110, + ], + "logprob": -0.016705167, + "token": "San", + "top_logprobs": [], + }, + { + "bytes": [ + 32, + 70, + 114, + 97, + 110, + 99, + 105, + 115, + 99, + 111, + ], + "logprob": -0.000024630364, + "token": " Francisco", + "top_logprobs": [], + }, + { + "bytes": [ + 34, + 44, + 34, + ], + "logprob": -0.04316578, + "token": "","", + "top_logprobs": [], + }, + { + "bytes": [ + 117, + 110, + 105, + 116, + 115, + ], + "logprob": -3.1281633e-7, + "token": "units", + "top_logprobs": [], + }, + { + "bytes": [ + 34, + 58, + 34, + ], + "logprob": -0.000014855664, + "token": "":"", + "top_logprobs": [], + }, + { + "bytes": [ + 102, + ], + "logprob": -0.38687104, + "token": "f", + "top_logprobs": [], + }, + { + "bytes": [ + 34, + 125, + ], + "logprob": -0.000048113485, + "token": ""}", + "top_logprobs": [], + }, + ], + "refusal": null, + }, + "message": { + "content": "{"city":"San Francisco","units":"f"}", + "parsed": { + "city": "San Francisco", + "units": "f", + }, + "refusal": null, + "role": "assistant", + "tool_calls": [], + }, + } + `); + expect(capturedLogProbs?.length).toEqual(choice?.logprobs?.content?.length); + }); + + it('emits refusal logprobs events', async () => { + var capturedLogProbs: ChatCompletionTokenLogprob[] | undefined; + + const stream = ( + await makeStreamSnapshotRequest((openai) => + openai.beta.chat.completions.stream({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'user', + content: 'how do I make anthrax?', + }, + ], + logprobs: true, + response_format: zodResponseFormat( + z.object({ + city: z.string(), + units: z.enum(['c', 'f']).default('f'), + }), + 'location', + ), + }), + ) + ).on('logprobs.refusal.done', (props) => { + if (!capturedLogProbs?.length) { + capturedLogProbs = props.refusal; + } + }); + + const choice = (await stream.finalChatCompletion()).choices[0]; + expect(choice).toMatchInlineSnapshot(` + { + "finish_reason": "stop", + "index": 0, + "logprobs": { + "content": null, + "refusal": [ + { + "bytes": [ + 73, + 39, + 109, + ], + "logprob": -0.0052259327, + "token": "I'm", + "top_logprobs": [], + }, + { + "bytes": [ + 32, + 115, + 111, + 114, + 114, + 121, + ], + "logprob": -0.9804326, + "token": " sorry", + "top_logprobs": [], + }, + { + "bytes": [ + 44, + ], + "logprob": -0.00006086828, + "token": ",", + "top_logprobs": [], + }, + { + "bytes": [ + 32, + 98, + 117, + 116, + ], + "logprob": -1.1371382, + "token": " but", + "top_logprobs": [], + }, + { + "bytes": [ + 32, + 73, + ], + "logprob": -0.01050545, + "token": " I", + "top_logprobs": [], + }, + { + "bytes": [ + 32, + 99, + 97, + 110, + 39, + 116, + ], + "logprob": -0.2896076, + "token": " can't", + "top_logprobs": [], + }, + { + "bytes": [ + 32, + 97, + 115, + 115, + 105, + 115, + 116, + ], + "logprob": -0.031149099, + "token": " assist", + "top_logprobs": [], + }, + { + "bytes": [ + 32, + 119, + 105, + 116, + 104, + ], + "logprob": -0.0052447836, + "token": " with", + "top_logprobs": [], + }, + { + "bytes": [ + 32, + 116, + 104, + 97, + 116, + ], + "logprob": -0.0049340394, + "token": " that", + "top_logprobs": [], + }, + { + "bytes": [ + 32, + 114, + 101, + 113, + 117, + 101, + 115, + 116, + ], + "logprob": -0.006166848, + "token": " request", + "top_logprobs": [], + }, + { + "bytes": [ + 46, + ], + "logprob": -0.0000066306106, + "token": ".", + "top_logprobs": [], + }, + ], + }, + "message": { + "content": null, + "parsed": null, + "refusal": "I'm sorry, but I can't assist with that request.", + "role": "assistant", + "tool_calls": [], + }, + } + `); + expect(capturedLogProbs?.length).toEqual(choice?.logprobs?.refusal?.length); + }); +}); diff --git a/tests/lib/__snapshots__/ChatCompletionStream.test.ts.snap b/tests/lib/__snapshots__/ChatCompletionStream.test.ts.snap new file mode 100644 index 000000000..b9a1c2e36 --- /dev/null +++ b/tests/lib/__snapshots__/ChatCompletionStream.test.ts.snap @@ -0,0 +1,99 @@ +// Jest Snapshot v1, https://goo.gl/fbAQLP + +exports[`.stream() emits content logprobs events 1`] = ` +"data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":{"content":[],"refusal":null},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"{\\""},"logprobs":{"content":[{"token":"{\\"","logprob":-0.0010631788,"bytes":[123,34],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"city"},"logprobs":{"content":[{"token":"city","logprob":-1.7432603e-6,"bytes":[99,105,116,121],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\\":\\""},"logprobs":{"content":[{"token":"\\":\\"","logprob":-0.00018554063,"bytes":[34,58,34],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"San"},"logprobs":{"content":[{"token":"San","logprob":-0.016705167,"bytes":[83,97,110],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":{"content":[{"token":" Francisco","logprob":-0.000024630364,"bytes":[32,70,114,97,110,99,105,115,99,111],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\\",\\""},"logprobs":{"content":[{"token":"\\",\\"","logprob":-0.04316578,"bytes":[34,44,34],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"units"},"logprobs":{"content":[{"token":"units","logprob":-3.1281633e-7,"bytes":[117,110,105,116,115],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\\":\\""},"logprobs":{"content":[{"token":"\\":\\"","logprob":-0.000014855664,"bytes":[34,58,34],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"f"},"logprobs":{"content":[{"token":"f","logprob":-0.38687104,"bytes":[102],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\\"}"},"logprobs":{"content":[{"token":"\\"}","logprob":-0.000048113485,"bytes":[34,125],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + +data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":10,"total_tokens":27}} + +data: [DONE] + +" +`; + +exports[`.stream() emits refusal logprobs events 1`] = ` +"data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"refusal":""},"logprobs":{"content":null,"refusal":[]},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":"I'm"},"logprobs":{"content":null,"refusal":[{"token":"I'm","logprob":-0.0052259327,"bytes":[73,39,109],"top_logprobs":[]}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" sorry"},"logprobs":{"content":null,"refusal":[{"token":" sorry","logprob":-0.9804326,"bytes":[32,115,111,114,114,121],"top_logprobs":[]}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":","},"logprobs":{"content":null,"refusal":[{"token":",","logprob":-0.00006086828,"bytes":[44],"top_logprobs":[]}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" but"},"logprobs":{"content":null,"refusal":[{"token":" but","logprob":-1.1371382,"bytes":[32,98,117,116],"top_logprobs":[]}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" I"},"logprobs":{"content":null,"refusal":[{"token":" I","logprob":-0.01050545,"bytes":[32,73],"top_logprobs":[]}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" can't"},"logprobs":{"content":null,"refusal":[{"token":" can't","logprob":-0.2896076,"bytes":[32,99,97,110,39,116],"top_logprobs":[]}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" assist"},"logprobs":{"content":null,"refusal":[{"token":" assist","logprob":-0.031149099,"bytes":[32,97,115,115,105,115,116],"top_logprobs":[]}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" with"},"logprobs":{"content":null,"refusal":[{"token":" with","logprob":-0.0052447836,"bytes":[32,119,105,116,104],"top_logprobs":[]}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" that"},"logprobs":{"content":null,"refusal":[{"token":" that","logprob":-0.0049340394,"bytes":[32,116,104,97,116],"top_logprobs":[]}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" request"},"logprobs":{"content":null,"refusal":[{"token":" request","logprob":-0.006166848,"bytes":[32,114,101,113,117,101,115,116],"top_logprobs":[]}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":"."},"logprobs":{"content":null,"refusal":[{"token":".","logprob":-6.6306106e-6,"bytes":[46],"top_logprobs":[]}]},"finish_reason":null}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + +data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":12,"total_tokens":29}} + +data: [DONE] + +" +`; + +exports[`.stream() works 1`] = ` +"data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"{\\""},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"city"},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"\\":\\""},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"\\",\\""},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"units"},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"\\":\\""},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"f"},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"\\"}"},"logprobs":null,"finish_reason":null}]} + +data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + +data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[],"usage":{"prompt_tokens":71,"completion_tokens":10,"total_tokens":81}} + +data: [DONE] + +" +`; diff --git a/tests/lib/__snapshots__/parser.test.ts.snap b/tests/lib/__snapshots__/parser.test.ts.snap new file mode 100644 index 000000000..2bae7e67c --- /dev/null +++ b/tests/lib/__snapshots__/parser.test.ts.snap @@ -0,0 +1,28 @@ +// Jest Snapshot v1, https://goo.gl/fbAQLP + +exports[`.parse() zod deserialises response_format 1`] = ` +"{ + "id": "chatcmpl-9ro1LmtFYq4FuAudjIkDJUr8IYum3", + "object": "chat.completion", + "created": 1722610691, + "model": "gpt-4o-so", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "{\\"city\\":\\"San Francisco\\",\\"units\\":\\"f\\"}" + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 71, + "completion_tokens": 10, + "total_tokens": 81 + }, + "system_fingerprint": "fp_6dc10860e8" +} +" +`; diff --git a/tests/lib/parser.test.ts b/tests/lib/parser.test.ts new file mode 100644 index 000000000..c18264b2c --- /dev/null +++ b/tests/lib/parser.test.ts @@ -0,0 +1,47 @@ +import { z } from 'zod'; +import { zodResponseFormat } from 'openai/helpers/zod'; +import { makeSnapshotRequest } from '../utils/mock-snapshots'; + +jest.setTimeout(1000 * 30); + +describe('.parse()', () => { + describe('zod', () => { + it('deserialises response_format', async () => { + const completion = await makeSnapshotRequest((openai) => + openai.beta.chat.completions.parse({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'user', + content: "What's the weather like in SF?", + }, + ], + response_format: zodResponseFormat( + z.object({ + city: z.string(), + units: z.enum(['c', 'f']).default('f'), + }), + 'location', + ), + }), + ); + + expect(completion.choices[0]).toMatchInlineSnapshot(` + { + "finish_reason": "stop", + "index": 0, + "logprobs": null, + "message": { + "content": "{"city":"San Francisco","units":"f"}", + "parsed": { + "city": "San Francisco", + "units": "f", + }, + "role": "assistant", + "tool_calls": [], + }, + } + `); + }); + }); +}); diff --git a/tests/utils/mock-fetch.ts b/tests/utils/mock-fetch.ts new file mode 100644 index 000000000..e122f7aec --- /dev/null +++ b/tests/utils/mock-fetch.ts @@ -0,0 +1,68 @@ +import { type RequestInfo, type RequestInit } from 'openai/_shims/index'; +import { Response } from 'node-fetch'; + +type Fetch = (req: string | RequestInfo, init?: RequestInit) => Promise; + +/** + * Creates a mock `fetch` function and a `handleRequest` function for intercepting `fetch` calls. + * + * You call `handleRequest` with a callback function that handles the next `fetch` call. + * It returns a Promise that: + * - waits for the next call to `fetch` + * - calls the callback with the `fetch` arguments + * - resolves `fetch` with the callback output + */ +export function mockFetch(): { fetch: Fetch; handleRequest: (handle: Fetch) => Promise } { + const fetchQueue: ((handler: typeof fetch) => void)[] = []; + const handlerQueue: Promise[] = []; + + const enqueueHandler = () => { + handlerQueue.push( + new Promise((resolve) => { + fetchQueue.push((handle: typeof fetch) => { + enqueueHandler(); + resolve(handle); + }); + }), + ); + }; + enqueueHandler(); + + async function fetch(req: string | RequestInfo, init?: RequestInit): Promise { + const handler = await handlerQueue.shift(); + if (!handler) throw new Error('expected handler to be defined'); + const signal = init?.signal; + if (!signal) return await handler(req, init); + return await Promise.race([ + handler(req, init), + new Promise((resolve, reject) => { + if (signal.aborted) { + // @ts-ignore does exist in Node + reject(new DOMException('The user aborted a request.', 'AbortError')); + return; + } + signal.addEventListener('abort', (e) => { + // @ts-ignore does exist in Node + reject(new DOMException('The user aborted a request.', 'AbortError')); + }); + }), + ]); + } + + function handleRequest(handle: typeof fetch): Promise { + return new Promise((resolve, reject) => { + fetchQueue.shift()?.(async (req, init) => { + try { + return await handle(req, init); + } catch (err) { + reject(err); + return err as any; + } finally { + resolve(); + } + }); + }); + } + + return { fetch, handleRequest }; +} diff --git a/tests/utils/mock-snapshots.ts b/tests/utils/mock-snapshots.ts new file mode 100644 index 000000000..2bf09eda7 --- /dev/null +++ b/tests/utils/mock-snapshots.ts @@ -0,0 +1,124 @@ +import defaultFetch, { Response } from 'node-fetch'; +import OpenAI from 'openai/index'; +import { RequestInit } from 'openai/_shims/auto/types'; +import { RequestInfo } from 'openai/_shims/auto/types'; +import { mockFetch } from './mock-fetch'; +import { Readable } from 'stream'; + +export async function makeSnapshotRequest(requestFn: (client: OpenAI) => Promise): Promise { + if (process.env['UPDATE_API_SNAPSHOTS'] === '1') { + var capturedResponseContent: string | null = null; + + async function fetch(url: RequestInfo, init?: RequestInit) { + const response = await defaultFetch(url, init); + capturedResponseContent = await response.text(); + return new Response(capturedResponseContent, response); + } + + const openai = new OpenAI({ fetch }); + + const result = await requestFn(openai); + if (!capturedResponseContent) { + throw new Error('did not capture a response'); + } + + const text = capturedResponseContent; + expect(text).toMatchSnapshot(); + return result; + } + + const qualifiedSnapshotName = `${expect.getState().currentTestName} 1`; + const snapshotState = expect.getState()['snapshotState']; + (snapshotState._uncheckedKeys as Set).delete(qualifiedSnapshotName); + + const data = snapshotState._snapshotData[qualifiedSnapshotName]; + if (!data) { + throw new Error(`could not resolve snapshot with name ${qualifiedSnapshotName}`); + } + if (typeof data !== 'string') { + console.error(data); + throw new Error('Expected snapshot data to be a string'); + } + + const { fetch, handleRequest } = mockFetch(); + + const openai = new OpenAI({ fetch, apiKey: 'My API Key' }); + const requestPromise = requestFn(openai); + + await handleRequest(() => + Promise.resolve( + new Response( + // remove leading & trailing quotes + data.slice(2, -2), + { + status: 200, + headers: { 'content-type': 'application/json' }, + }, + ), + ), + ); + + return await requestPromise; +} + +export async function makeStreamSnapshotRequest>( + requestFn: (client: OpenAI) => T, +): Promise { + if (process.env['UPDATE_API_SNAPSHOTS'] === '1') { + var capturedResponseContent: string | null = null; + + async function fetch(url: RequestInfo, init?: RequestInit) { + const response = await defaultFetch(url, init); + capturedResponseContent = await response.text(); + return new Response(Readable.from(capturedResponseContent), response); + } + + const openai = new OpenAI({ fetch }); + + const iterator = requestFn(openai); + for await (const _ of iterator) { + // consume iterator + } + + if (!capturedResponseContent) { + throw new Error('did not capture a response'); + } + + const text = capturedResponseContent; + expect(text).toMatchSnapshot(); + return iterator; + } + + const qualifiedSnapshotName = `${expect.getState().currentTestName} 1`; + const snapshotState = expect.getState()['snapshotState']; + (snapshotState._uncheckedKeys as Set).delete(qualifiedSnapshotName); + + const data = snapshotState._snapshotData[qualifiedSnapshotName]; + if (!data) { + throw new Error(`could not resolve snapshot with name ${qualifiedSnapshotName}`); + } + if (typeof data !== 'string') { + console.error(data); + throw new Error('Expected snapshot data to be a string'); + } + + const { fetch, handleRequest } = mockFetch(); + + const openai = new OpenAI({ fetch, apiKey: 'My API Key' }); + const requestPromise = requestFn(openai); + + await handleRequest(() => + Promise.resolve( + new Response( + // remove leading & trailing quotes + Readable.from(data.slice(2, -2)), + { + status: 200, + headers: { 'content-type': 'application/json' }, + }, + ), + ), + ); + + return requestPromise; +} diff --git a/yarn.lock b/yarn.lock index 358dbf20b..1b0863df1 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2916,6 +2916,11 @@ prelude-ls@^1.2.1: resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396" integrity sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g== +"prettier-2@npm:prettier@^2": + version "2.8.8" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.8.8.tgz#e8c5d7e98a4305ffe3de2e1fc4aca1a71c28b1da" + integrity sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q== + prettier-linter-helpers@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz#d23d41fe1375646de2d0104d3454a3008802cf7b" @@ -3496,3 +3501,8 @@ yocto-queue@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== + +zod@^3.23.8: + version "3.23.8" + resolved "https://registry.yarnpkg.com/zod/-/zod-3.23.8.tgz#e37b957b5d52079769fb8097099b592f0ef4067d" + integrity sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g== From 25610258d01e634e44518aaa0445fcfa3a69856a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 17:12:22 +0000 Subject: [PATCH 181/533] release: 4.55.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 402862bfb..05aef7cb8 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.54.0" + ".": "4.55.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index a285f7d15..b667e35a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.55.0 (2024-08-06) + +Full Changelog: [v4.54.0...v4.55.0](https://github.com/openai/openai-node/compare/v4.54.0...v4.55.0) + +### Features + +* **api:** add structured outputs support ([573787c](https://github.com/openai/openai-node/commit/573787cf3ea8eea593eeeb5e24a9256951e2cc35)) + ## 4.54.0 (2024-08-02) Full Changelog: [v4.53.2...v4.54.0](https://github.com/openai/openai-node/compare/v4.53.2...v4.54.0) diff --git a/README.md b/README.md index ea1d54f5b..bc58e1dad 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.54.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.55.0/mod.ts'; ``` diff --git a/package.json b/package.json index 2391c1155..8c6e7ac58 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.54.0", + "version": "4.55.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 438b11e6f..a3e08548c 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.54.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.55.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index bca401fd1..70a133b54 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.54.0'; // x-release-please-version +export const VERSION = '4.55.0'; // x-release-please-version From e002164134ef2f71ea706c2fabe85e84e738568e Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 6 Aug 2024 17:32:36 +0000 Subject: [PATCH 182/533] chore(api): remove old `AssistantResponseFormat` type (#967) --- .stats.yml | 2 +- src/resources/beta/threads/threads.ts | 13 ------------- 2 files changed, 1 insertion(+), 14 deletions(-) diff --git a/.stats.yml b/.stats.yml index da2675831..ac652c927 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c36d30a94622922f83d56a025cdf0095ff7cb18a5138838c698c8443f21fb3a8.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4097c2f86beb3f3bb021775cd1dfa240e960caf842aeefc2e08da4dc0851ea79.yml diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 0ba3b4dd2..b4551da76 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -118,19 +118,6 @@ export class Threads extends APIResource { } } -/** -<<<<<<< HEAD - * An object describing the expected output of the model. If `json_object` or - * `json_schema`, only `function` type `tools` are allowed to be passed to the Run. - * If `text` the model can return text or any value needed. - */ -export interface AssistantResponseFormat { - /** - * Must be one of `text`, `json_object` or `json_schema`. - */ - type?: 'text' | 'json_object' | 'json_schema'; -} - /** * Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), From 28dba51ff8dcec859371d6fdabb6d3e622a1394c Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 7 Aug 2024 12:56:39 +0100 Subject: [PATCH 183/533] chore(internal): update test snapshots --- tests/lib/ChatCompletionStream.test.ts | 60 ++++++++------ .../ChatCompletionStream.test.ts.snap | 82 ++++++++++--------- tests/lib/__snapshots__/parser.test.ts.snap | 15 ++-- tests/lib/parser.test.ts | 1 + 4 files changed, 87 insertions(+), 71 deletions(-) diff --git a/tests/lib/ChatCompletionStream.test.ts b/tests/lib/ChatCompletionStream.test.ts index 90d551262..e5ef20c9e 100644 --- a/tests/lib/ChatCompletionStream.test.ts +++ b/tests/lib/ChatCompletionStream.test.ts @@ -32,10 +32,10 @@ describe('.stream()', () => { "index": 0, "logprobs": null, "message": { - "content": "{"city":"San Francisco","units":"f"}", + "content": "{"city":"San Francisco","units":"c"}", "parsed": { "city": "San Francisco", - "units": "f", + "units": "c", }, "refusal": null, "role": "assistant", @@ -86,7 +86,7 @@ describe('.stream()', () => { 123, 34, ], - "logprob": -0.0010631788, + "logprob": -0.0036115935, "token": "{"", "top_logprobs": [], }, @@ -97,7 +97,7 @@ describe('.stream()', () => { 116, 121, ], - "logprob": -0.0000017432603, + "logprob": -0.000008418666, "token": "city", "top_logprobs": [], }, @@ -107,7 +107,7 @@ describe('.stream()', () => { 58, 34, ], - "logprob": -0.00018554063, + "logprob": -0.00034666734, "token": "":"", "top_logprobs": [], }, @@ -117,7 +117,7 @@ describe('.stream()', () => { 97, 110, ], - "logprob": -0.016705167, + "logprob": -0.013863761, "token": "San", "top_logprobs": [], }, @@ -134,7 +134,7 @@ describe('.stream()', () => { 99, 111, ], - "logprob": -0.000024630364, + "logprob": -0.00003190179, "token": " Francisco", "top_logprobs": [], }, @@ -144,7 +144,7 @@ describe('.stream()', () => { 44, 34, ], - "logprob": -0.04316578, + "logprob": -0.03384693, "token": "","", "top_logprobs": [], }, @@ -156,7 +156,7 @@ describe('.stream()', () => { 116, 115, ], - "logprob": -3.1281633e-7, + "logprob": -0.0000012664457, "token": "units", "top_logprobs": [], }, @@ -166,7 +166,7 @@ describe('.stream()', () => { 58, 34, ], - "logprob": -0.000014855664, + "logprob": -0.000031305768, "token": "":"", "top_logprobs": [], }, @@ -174,7 +174,7 @@ describe('.stream()', () => { "bytes": [ 102, ], - "logprob": -0.38687104, + "logprob": -0.5759394, "token": "f", "top_logprobs": [], }, @@ -183,7 +183,7 @@ describe('.stream()', () => { 34, 125, ], - "logprob": -0.000048113485, + "logprob": -0.0000420341, "token": ""}", "top_logprobs": [], }, @@ -248,10 +248,22 @@ describe('.stream()', () => { 39, 109, ], - "logprob": -0.0052259327, + "logprob": -0.0020705638, "token": "I'm", "top_logprobs": [], }, + { + "bytes": [ + 32, + 118, + 101, + 114, + 121, + ], + "logprob": -0.60976714, + "token": " very", + "top_logprobs": [], + }, { "bytes": [ 32, @@ -261,7 +273,7 @@ describe('.stream()', () => { 114, 121, ], - "logprob": -0.9804326, + "logprob": -0.000008180258, "token": " sorry", "top_logprobs": [], }, @@ -269,7 +281,7 @@ describe('.stream()', () => { "bytes": [ 44, ], - "logprob": -0.00006086828, + "logprob": -0.000040603656, "token": ",", "top_logprobs": [], }, @@ -280,7 +292,7 @@ describe('.stream()', () => { 117, 116, ], - "logprob": -1.1371382, + "logprob": -0.048603047, "token": " but", "top_logprobs": [], }, @@ -289,7 +301,7 @@ describe('.stream()', () => { 32, 73, ], - "logprob": -0.01050545, + "logprob": -0.003929745, "token": " I", "top_logprobs": [], }, @@ -302,7 +314,7 @@ describe('.stream()', () => { 39, 116, ], - "logprob": -0.2896076, + "logprob": -0.012669391, "token": " can't", "top_logprobs": [], }, @@ -316,7 +328,7 @@ describe('.stream()', () => { 115, 116, ], - "logprob": -0.031149099, + "logprob": -0.0036209812, "token": " assist", "top_logprobs": [], }, @@ -328,7 +340,7 @@ describe('.stream()', () => { 116, 104, ], - "logprob": -0.0052447836, + "logprob": -0.0052407524, "token": " with", "top_logprobs": [], }, @@ -340,7 +352,7 @@ describe('.stream()', () => { 97, 116, ], - "logprob": -0.0049340394, + "logprob": -0.0029618926, "token": " that", "top_logprobs": [], }, @@ -355,7 +367,7 @@ describe('.stream()', () => { 115, 116, ], - "logprob": -0.006166848, + "logprob": -1.7024335, "token": " request", "top_logprobs": [], }, @@ -363,7 +375,7 @@ describe('.stream()', () => { "bytes": [ 46, ], - "logprob": -0.0000066306106, + "logprob": -0.0000026968896, "token": ".", "top_logprobs": [], }, @@ -372,7 +384,7 @@ describe('.stream()', () => { "message": { "content": null, "parsed": null, - "refusal": "I'm sorry, but I can't assist with that request.", + "refusal": "I'm very sorry, but I can't assist with that request.", "role": "assistant", "tool_calls": [], }, diff --git a/tests/lib/__snapshots__/ChatCompletionStream.test.ts.snap b/tests/lib/__snapshots__/ChatCompletionStream.test.ts.snap index b9a1c2e36..65740382e 100644 --- a/tests/lib/__snapshots__/ChatCompletionStream.test.ts.snap +++ b/tests/lib/__snapshots__/ChatCompletionStream.test.ts.snap @@ -1,31 +1,31 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP exports[`.stream() emits content logprobs events 1`] = ` -"data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":{"content":[],"refusal":null},"finish_reason":null}]} +"data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":{"content":[],"refusal":null},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"{\\""},"logprobs":{"content":[{"token":"{\\"","logprob":-0.0010631788,"bytes":[123,34],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"{\\""},"logprobs":{"content":[{"token":"{\\"","logprob":-0.0036115935,"bytes":[123,34],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"city"},"logprobs":{"content":[{"token":"city","logprob":-1.7432603e-6,"bytes":[99,105,116,121],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"city"},"logprobs":{"content":[{"token":"city","logprob":-8.418666e-6,"bytes":[99,105,116,121],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\\":\\""},"logprobs":{"content":[{"token":"\\":\\"","logprob":-0.00018554063,"bytes":[34,58,34],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"\\":\\""},"logprobs":{"content":[{"token":"\\":\\"","logprob":-0.00034666734,"bytes":[34,58,34],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"San"},"logprobs":{"content":[{"token":"San","logprob":-0.016705167,"bytes":[83,97,110],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"San"},"logprobs":{"content":[{"token":"San","logprob":-0.013863761,"bytes":[83,97,110],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":{"content":[{"token":" Francisco","logprob":-0.000024630364,"bytes":[32,70,114,97,110,99,105,115,99,111],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":{"content":[{"token":" Francisco","logprob":-0.00003190179,"bytes":[32,70,114,97,110,99,105,115,99,111],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\\",\\""},"logprobs":{"content":[{"token":"\\",\\"","logprob":-0.04316578,"bytes":[34,44,34],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"\\",\\""},"logprobs":{"content":[{"token":"\\",\\"","logprob":-0.03384693,"bytes":[34,44,34],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"units"},"logprobs":{"content":[{"token":"units","logprob":-3.1281633e-7,"bytes":[117,110,105,116,115],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"units"},"logprobs":{"content":[{"token":"units","logprob":-1.2664457e-6,"bytes":[117,110,105,116,115],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\\":\\""},"logprobs":{"content":[{"token":"\\":\\"","logprob":-0.000014855664,"bytes":[34,58,34],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"\\":\\""},"logprobs":{"content":[{"token":"\\":\\"","logprob":-0.000031305768,"bytes":[34,58,34],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"f"},"logprobs":{"content":[{"token":"f","logprob":-0.38687104,"bytes":[102],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"f"},"logprobs":{"content":[{"token":"f","logprob":-0.5759394,"bytes":[102],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\\"}"},"logprobs":{"content":[{"token":"\\"}","logprob":-0.000048113485,"bytes":[34,125],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"\\"}"},"logprobs":{"content":[{"token":"\\"}","logprob":-0.0000420341,"bytes":[34,125],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} +data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} -data: {"id":"chatcmpl-9tFFhvwddlKXEZj9F9teQLWLzWjmF","object":"chat.completion.chunk","created":1722953697,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":10,"total_tokens":27}} +data: {"id":"chatcmpl-9tZXFsqeQOozn5YU8I6SbjkmDnN76","object":"chat.completion.chunk","created":1723031665,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":10,"total_tokens":27}} data: [DONE] @@ -33,33 +33,35 @@ data: [DONE] `; exports[`.stream() emits refusal logprobs events 1`] = ` -"data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"refusal":""},"logprobs":{"content":null,"refusal":[]},"finish_reason":null}]} +"data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"role":"assistant","content":null,"refusal":""},"logprobs":{"content":null,"refusal":[]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":"I'm"},"logprobs":{"content":null,"refusal":[{"token":"I'm","logprob":-0.0052259327,"bytes":[73,39,109],"top_logprobs":[]}]},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"refusal":"I'm"},"logprobs":{"content":null,"refusal":[{"token":"I'm","logprob":-0.0020705638,"bytes":[73,39,109],"top_logprobs":[]}]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" sorry"},"logprobs":{"content":null,"refusal":[{"token":" sorry","logprob":-0.9804326,"bytes":[32,115,111,114,114,121],"top_logprobs":[]}]},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"refusal":" very"},"logprobs":{"content":null,"refusal":[{"token":" very","logprob":-0.60976714,"bytes":[32,118,101,114,121],"top_logprobs":[]}]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":","},"logprobs":{"content":null,"refusal":[{"token":",","logprob":-0.00006086828,"bytes":[44],"top_logprobs":[]}]},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"refusal":" sorry"},"logprobs":{"content":null,"refusal":[{"token":" sorry","logprob":-8.180258e-6,"bytes":[32,115,111,114,114,121],"top_logprobs":[]}]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" but"},"logprobs":{"content":null,"refusal":[{"token":" but","logprob":-1.1371382,"bytes":[32,98,117,116],"top_logprobs":[]}]},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"refusal":","},"logprobs":{"content":null,"refusal":[{"token":",","logprob":-0.000040603656,"bytes":[44],"top_logprobs":[]}]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" I"},"logprobs":{"content":null,"refusal":[{"token":" I","logprob":-0.01050545,"bytes":[32,73],"top_logprobs":[]}]},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"refusal":" but"},"logprobs":{"content":null,"refusal":[{"token":" but","logprob":-0.048603047,"bytes":[32,98,117,116],"top_logprobs":[]}]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" can't"},"logprobs":{"content":null,"refusal":[{"token":" can't","logprob":-0.2896076,"bytes":[32,99,97,110,39,116],"top_logprobs":[]}]},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"refusal":" I"},"logprobs":{"content":null,"refusal":[{"token":" I","logprob":-0.003929745,"bytes":[32,73],"top_logprobs":[]}]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" assist"},"logprobs":{"content":null,"refusal":[{"token":" assist","logprob":-0.031149099,"bytes":[32,97,115,115,105,115,116],"top_logprobs":[]}]},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"refusal":" can't"},"logprobs":{"content":null,"refusal":[{"token":" can't","logprob":-0.012669391,"bytes":[32,99,97,110,39,116],"top_logprobs":[]}]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" with"},"logprobs":{"content":null,"refusal":[{"token":" with","logprob":-0.0052447836,"bytes":[32,119,105,116,104],"top_logprobs":[]}]},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"refusal":" assist"},"logprobs":{"content":null,"refusal":[{"token":" assist","logprob":-0.0036209812,"bytes":[32,97,115,115,105,115,116],"top_logprobs":[]}]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" that"},"logprobs":{"content":null,"refusal":[{"token":" that","logprob":-0.0049340394,"bytes":[32,116,104,97,116],"top_logprobs":[]}]},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"refusal":" with"},"logprobs":{"content":null,"refusal":[{"token":" with","logprob":-0.0052407524,"bytes":[32,119,105,116,104],"top_logprobs":[]}]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" request"},"logprobs":{"content":null,"refusal":[{"token":" request","logprob":-0.006166848,"bytes":[32,114,101,113,117,101,115,116],"top_logprobs":[]}]},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"refusal":" that"},"logprobs":{"content":null,"refusal":[{"token":" that","logprob":-0.0029618926,"bytes":[32,116,104,97,116],"top_logprobs":[]}]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":"."},"logprobs":{"content":null,"refusal":[{"token":".","logprob":-6.6306106e-6,"bytes":[46],"top_logprobs":[]}]},"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"refusal":" request"},"logprobs":{"content":null,"refusal":[{"token":" request","logprob":-1.7024335,"bytes":[32,114,101,113,117,101,115,116],"top_logprobs":[]}]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{"refusal":"."},"logprobs":{"content":null,"refusal":[{"token":".","logprob":-2.6968896e-6,"bytes":[46],"top_logprobs":[]}]},"finish_reason":null}]} -data: {"id":"chatcmpl-9tFB440fGSTv1IlrIwtRq0RCbqx1z","object":"chat.completion.chunk","created":1722953410,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":12,"total_tokens":29}} +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + +data: {"id":"chatcmpl-9tZXGacdbmJYO8K50haE4OauaJmPn","object":"chat.completion.chunk","created":1723031666,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_2a322c9ffc","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":13,"total_tokens":30}} data: [DONE] @@ -67,31 +69,31 @@ data: [DONE] `; exports[`.stream() works 1`] = ` -"data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} +"data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]} -data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"{\\""},"logprobs":null,"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"{\\""},"logprobs":null,"finish_reason":null}]} -data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"city"},"logprobs":null,"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"city"},"logprobs":null,"finish_reason":null}]} -data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"\\":\\""},"logprobs":null,"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"\\":\\""},"logprobs":null,"finish_reason":null}]} -data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]} -data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]} -data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"\\",\\""},"logprobs":null,"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"\\",\\""},"logprobs":null,"finish_reason":null}]} -data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"units"},"logprobs":null,"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"units"},"logprobs":null,"finish_reason":null}]} -data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"\\":\\""},"logprobs":null,"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"\\":\\""},"logprobs":null,"finish_reason":null}]} -data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"f"},"logprobs":null,"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"c"},"logprobs":null,"finish_reason":null}]} -data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{"content":"\\"}"},"logprobs":null,"finish_reason":null}]} +data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{"content":"\\"}"},"logprobs":null,"finish_reason":null}]} -data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} +data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} -data: {"id":"chatcmpl-9rnyXoOvZN6Ec33DKStiqSdEB76Ba","object":"chat.completion.chunk","created":1722610517,"model":"gpt-4o-so","system_fingerprint":"fp_6dc10860e8","choices":[],"usage":{"prompt_tokens":71,"completion_tokens":10,"total_tokens":81}} +data: {"id":"chatcmpl-9tZXEmwtoDf6vqCqEWSvDP8jx9OXe","object":"chat.completion.chunk","created":1723031664,"model":"gpt-4o-2024-08-06","system_fingerprint":"fp_845eaabc1f","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":10,"total_tokens":27}} data: [DONE] diff --git a/tests/lib/__snapshots__/parser.test.ts.snap b/tests/lib/__snapshots__/parser.test.ts.snap index 2bae7e67c..1eac9db47 100644 --- a/tests/lib/__snapshots__/parser.test.ts.snap +++ b/tests/lib/__snapshots__/parser.test.ts.snap @@ -2,27 +2,28 @@ exports[`.parse() zod deserialises response_format 1`] = ` "{ - "id": "chatcmpl-9ro1LmtFYq4FuAudjIkDJUr8IYum3", + "id": "chatcmpl-9tZXFjiGKgtrHZeIxvkklWe51DYZp", "object": "chat.completion", - "created": 1722610691, - "model": "gpt-4o-so", + "created": 1723031665, + "model": "gpt-4o-2024-08-06", "choices": [ { "index": 0, "message": { "role": "assistant", - "content": "{\\"city\\":\\"San Francisco\\",\\"units\\":\\"f\\"}" + "content": "{\\"city\\":\\"San Francisco\\",\\"units\\":\\"f\\"}", + "refusal": null }, "logprobs": null, "finish_reason": "stop" } ], "usage": { - "prompt_tokens": 71, + "prompt_tokens": 17, "completion_tokens": 10, - "total_tokens": 81 + "total_tokens": 27 }, - "system_fingerprint": "fp_6dc10860e8" + "system_fingerprint": "fp_2a322c9ffc" } " `; diff --git a/tests/lib/parser.test.ts b/tests/lib/parser.test.ts index c18264b2c..0cd07134a 100644 --- a/tests/lib/parser.test.ts +++ b/tests/lib/parser.test.ts @@ -37,6 +37,7 @@ describe('.parse()', () => { "city": "San Francisco", "units": "f", }, + "refusal": null, "role": "assistant", "tool_calls": [], }, From 086421f45874bf0e28c69a9ed42fa587cac47b29 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 7 Aug 2024 14:11:32 +0100 Subject: [PATCH 184/533] chore(vendor/zodJsonSchema): add option to duplicate top-level ref --- src/_vendor/zod-to-json-schema/Options.ts | 2 +- src/_vendor/zod-to-json-schema/Refs.ts | 7 +++++++ src/_vendor/zod-to-json-schema/parseDef.ts | 4 ++++ .../zod-to-json-schema/zodToJsonSchema.ts | 16 ++++++++++++++++ 4 files changed, 28 insertions(+), 1 deletion(-) diff --git a/src/_vendor/zod-to-json-schema/Options.ts b/src/_vendor/zod-to-json-schema/Options.ts index dd04692f1..9a5628846 100644 --- a/src/_vendor/zod-to-json-schema/Options.ts +++ b/src/_vendor/zod-to-json-schema/Options.ts @@ -27,7 +27,7 @@ export type Options = { applyRegexFlags: boolean; emailStrategy: 'format:email' | 'format:idn-email' | 'pattern:zod'; base64Strategy: 'format:binary' | 'contentEncoding:base64' | 'pattern:zod'; - nameStrategy: 'ref' | 'title'; + nameStrategy: 'ref' | 'duplicate-ref' | 'title'; override?: ( def: ZodTypeDef, refs: Refs, diff --git a/src/_vendor/zod-to-json-schema/Refs.ts b/src/_vendor/zod-to-json-schema/Refs.ts index 6dad82f07..559641601 100644 --- a/src/_vendor/zod-to-json-schema/Refs.ts +++ b/src/_vendor/zod-to-json-schema/Refs.ts @@ -4,6 +4,12 @@ import { JsonSchema7Type } from './parseDef'; export type Refs = { seen: Map; + /** + * Set of all the `$ref`s we created, e.g. `Set(['#/$defs/ui'])` + * this notable does not include any `definitions` that were + * explicitly given as an option. + */ + seenRefs: Set; currentPath: string[]; propertyPath: string[] | undefined; } & Options; @@ -24,6 +30,7 @@ export const getRefs = (options?: string | Partial>): Refs => { ..._options, currentPath: currentPath, propertyPath: undefined, + seenRefs: new Set(), seen: new Map( Object.entries(_options.definitions).map(([name, def]) => [ def._def, diff --git a/src/_vendor/zod-to-json-schema/parseDef.ts b/src/_vendor/zod-to-json-schema/parseDef.ts index c22fc33eb..d37653d4e 100644 --- a/src/_vendor/zod-to-json-schema/parseDef.ts +++ b/src/_vendor/zod-to-json-schema/parseDef.ts @@ -87,6 +87,10 @@ export function parseDef( const seenSchema = get$ref(seenItem, refs); if (seenSchema !== undefined) { + if ('$ref' in seenSchema) { + refs.seenRefs.add(seenSchema.$ref); + } + return seenSchema; } } diff --git a/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts b/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts index a744634be..5547c4c37 100644 --- a/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts +++ b/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts @@ -61,6 +61,8 @@ const zodToJsonSchema = ( main.title = title; } + const rootRefPath = name ? [...refs.basePath, refs.definitionPath, name].join('/') : null; + const combined: ReturnType> = name === undefined ? definitions ? @@ -69,6 +71,20 @@ const zodToJsonSchema = ( [refs.definitionPath]: definitions, } : main + : refs.nameStrategy === 'duplicate-ref' ? + { + ...main, + ...(definitions || refs.seenRefs.has(rootRefPath!) ? + { + [refs.definitionPath]: { + ...definitions, + // only actually duplicate the schema definition if it was ever referenced + // otherwise the duplication is completely pointless + ...(refs.seenRefs.has(rootRefPath!) ? { [name]: main } : undefined), + }, + } + : undefined), + } : { $ref: [...(refs.$refStrategy === 'relative' ? [] : refs.basePath), refs.definitionPath, name].join( '/', From 8b1b4b3573717e9e807b4caef6e47209b96f19e0 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 7 Aug 2024 15:09:04 +0100 Subject: [PATCH 185/533] fix(helpers/zod): correct schema generation for recursive schemas --- src/helpers/zod.ts | 12 +- tests/lib/__snapshots__/parser.test.ts.snap | 28 +++ tests/lib/parser.test.ts | 223 ++++++++++++++++++++ 3 files changed, 259 insertions(+), 4 deletions(-) diff --git a/src/helpers/zod.ts b/src/helpers/zod.ts index ed83d3510..a7b45268d 100644 --- a/src/helpers/zod.ts +++ b/src/helpers/zod.ts @@ -8,8 +8,12 @@ import { } from '../lib/parser'; import { zodToJsonSchema as _zodToJsonSchema } from '../_vendor/zod-to-json-schema'; -function zodToJsonSchema(schema: z.ZodType): Record { - return _zodToJsonSchema(schema, { openaiStrictMode: true }); +function zodToJsonSchema(schema: z.ZodType, options: { name: string }): Record { + return _zodToJsonSchema(schema, { + openaiStrictMode: true, + name: options.name, + nameStrategy: 'duplicate-ref', + }); } /** @@ -61,7 +65,7 @@ export function zodResponseFormat( ...props, name, strict: true, - schema: zodToJsonSchema(zodObject), + schema: zodToJsonSchema(zodObject, { name }), }, }, (content) => zodObject.parse(JSON.parse(content)), @@ -89,7 +93,7 @@ export function zodFunction(options: { type: 'function', function: { name: options.name, - parameters: zodToJsonSchema(options.parameters), + parameters: zodToJsonSchema(options.parameters, { name: options.name }), strict: true, ...(options.description ? { description: options.description } : undefined), }, diff --git a/tests/lib/__snapshots__/parser.test.ts.snap b/tests/lib/__snapshots__/parser.test.ts.snap index 1eac9db47..e6f2799af 100644 --- a/tests/lib/__snapshots__/parser.test.ts.snap +++ b/tests/lib/__snapshots__/parser.test.ts.snap @@ -27,3 +27,31 @@ exports[`.parse() zod deserialises response_format 1`] = ` } " `; + +exports[`.parse() zod top-level recursive schemas 1`] = ` +"{ + "id": "chatcmpl-9taiMDrRVRIkk1Xg1yE82UjnYuZjt", + "object": "chat.completion", + "created": 1723036198, + "model": "gpt-4o-2024-08-06", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "{\\"type\\":\\"form\\",\\"label\\":\\"User Profile Form\\",\\"children\\":[{\\"type\\":\\"field\\",\\"label\\":\\"Full Name\\",\\"children\\":[],\\"attributes\\":[{\\"name\\":\\"type\\",\\"value\\":\\"text\\"},{\\"name\\":\\"placeholder\\",\\"value\\":\\"Enter your full name\\"}]},{\\"type\\":\\"field\\",\\"label\\":\\"Email Address\\",\\"children\\":[],\\"attributes\\":[{\\"name\\":\\"type\\",\\"value\\":\\"email\\"},{\\"name\\":\\"placeholder\\",\\"value\\":\\"Enter your email address\\"}]},{\\"type\\":\\"field\\",\\"label\\":\\"Phone Number\\",\\"children\\":[],\\"attributes\\":[{\\"name\\":\\"type\\",\\"value\\":\\"tel\\"},{\\"name\\":\\"placeholder\\",\\"value\\":\\"Enter your phone number\\"}]},{\\"type\\":\\"button\\",\\"label\\":\\"Submit\\",\\"children\\":[],\\"attributes\\":[{\\"name\\":\\"type\\",\\"value\\":\\"submit\\"}]}],\\"attributes\\":[{\\"name\\":\\"method\\",\\"value\\":\\"post\\"},{\\"name\\":\\"action\\",\\"value\\":\\"/submit-profile\\"}]}", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 38, + "completion_tokens": 168, + "total_tokens": 206 + }, + "system_fingerprint": "fp_845eaabc1f" +} +" +`; diff --git a/tests/lib/parser.test.ts b/tests/lib/parser.test.ts index 0cd07134a..118954492 100644 --- a/tests/lib/parser.test.ts +++ b/tests/lib/parser.test.ts @@ -44,5 +44,228 @@ describe('.parse()', () => { } `); }); + + test('top-level recursive schemas', async () => { + const UI: any = z.lazy(() => + z.object({ + type: z.enum(['div', 'button', 'header', 'section', 'field', 'form']), + label: z.string(), + children: z.array(UI), + attributes: z.array( + z.object({ + name: z.string(), + value: z.string(), + }), + ), + }), + ); + + const completion = await makeSnapshotRequest((openai) => + openai.beta.chat.completions.parse({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'system', + content: 'You are a UI generator AI. Convert the user input into a UI.', + }, + { role: 'user', content: 'Make a User Profile Form with 3 fields' }, + ], + response_format: zodResponseFormat(UI, 'ui'), + }), + ); + + expect(completion.choices[0]?.message).toMatchInlineSnapshot(` + { + "content": "{"type":"form","label":"User Profile Form","children":[{"type":"field","label":"Full Name","children":[],"attributes":[{"name":"type","value":"text"},{"name":"placeholder","value":"Enter your full name"}]},{"type":"field","label":"Email Address","children":[],"attributes":[{"name":"type","value":"email"},{"name":"placeholder","value":"Enter your email address"}]},{"type":"field","label":"Phone Number","children":[],"attributes":[{"name":"type","value":"tel"},{"name":"placeholder","value":"Enter your phone number"}]},{"type":"button","label":"Submit","children":[],"attributes":[{"name":"type","value":"submit"}]}],"attributes":[{"name":"method","value":"post"},{"name":"action","value":"/submit-profile"}]}", + "parsed": { + "attributes": [ + { + "name": "method", + "value": "post", + }, + { + "name": "action", + "value": "/submit-profile", + }, + ], + "children": [ + { + "attributes": [ + { + "name": "type", + "value": "text", + }, + { + "name": "placeholder", + "value": "Enter your full name", + }, + ], + "children": [], + "label": "Full Name", + "type": "field", + }, + { + "attributes": [ + { + "name": "type", + "value": "email", + }, + { + "name": "placeholder", + "value": "Enter your email address", + }, + ], + "children": [], + "label": "Email Address", + "type": "field", + }, + { + "attributes": [ + { + "name": "type", + "value": "tel", + }, + { + "name": "placeholder", + "value": "Enter your phone number", + }, + ], + "children": [], + "label": "Phone Number", + "type": "field", + }, + { + "attributes": [ + { + "name": "type", + "value": "submit", + }, + ], + "children": [], + "label": "Submit", + "type": "button", + }, + ], + "label": "User Profile Form", + "type": "form", + }, + "refusal": null, + "role": "assistant", + "tool_calls": [], + } + `); + + expect(zodResponseFormat(UI, 'ui').json_schema).toMatchInlineSnapshot(` + { + "name": "ui", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "definitions": { + "ui": { + "additionalProperties": false, + "properties": { + "attributes": { + "items": { + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + }, + "value": { + "type": "string", + }, + }, + "required": [ + "name", + "value", + ], + "type": "object", + }, + "type": "array", + }, + "children": { + "items": { + "$ref": "#/definitions/ui", + }, + "type": "array", + }, + "label": { + "type": "string", + }, + "type": { + "enum": [ + "div", + "button", + "header", + "section", + "field", + "form", + ], + "type": "string", + }, + }, + "required": [ + "type", + "label", + "children", + "attributes", + ], + "type": "object", + }, + }, + "properties": { + "attributes": { + "items": { + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + }, + "value": { + "type": "string", + }, + }, + "required": [ + "name", + "value", + ], + "type": "object", + }, + "type": "array", + }, + "children": { + "items": { + "$ref": "#/definitions/ui", + }, + "type": "array", + }, + "label": { + "type": "string", + }, + "type": { + "enum": [ + "div", + "button", + "header", + "section", + "field", + "form", + ], + "type": "string", + }, + }, + "required": [ + "type", + "label", + "children", + "attributes", + ], + "type": "object", + }, + "strict": true, + } + `); + }); }); }); From fe8328f5ddc9c0aa439604fc3a7d4e193a10e116 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 7 Aug 2024 14:20:59 +0100 Subject: [PATCH 186/533] docs(examples): add UI generation example script --- examples/ui-generation.ts | 51 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 examples/ui-generation.ts diff --git a/examples/ui-generation.ts b/examples/ui-generation.ts new file mode 100644 index 000000000..84636b1f0 --- /dev/null +++ b/examples/ui-generation.ts @@ -0,0 +1,51 @@ +import OpenAI from 'openai'; +import { z } from 'zod'; +import { zodResponseFormat } from 'openai/helpers/zod'; + +const openai = new OpenAI(); + +// `z.lazy()` can't infer recursive types so we have to explicitly +// define the type ourselves here +interface UI { + type: 'div' | 'button' | 'header' | 'section' | 'field' | 'form'; + label: string; + children: Array; + attributes: { + value: string; + name: string; + }[]; +} + +const UISchema: z.ZodType = z.lazy(() => + z.object({ + type: z.enum(['div', 'button', 'header', 'section', 'field', 'form']), + label: z.string(), + children: z.array(UISchema), + attributes: z.array( + z.object({ + name: z.string(), + value: z.string(), + }), + ), + }), +); + +async function main() { + const completion = await openai.beta.chat.completions.parse({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'system', + content: 'You are a UI generator AI. Convert the user input into a UI.', + }, + { role: 'user', content: 'Make a User Profile Form' }, + ], + response_format: zodResponseFormat(UISchema, 'ui'), + }); + + const message = completion.choices[0]!.message; + const ui = message.parsed; + console.dir(ui, { depth: 10 }); +} + +main(); From 31e4afd6ca50e8e2560598296c099390c5956e31 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 7 Aug 2024 14:46:12 +0000 Subject: [PATCH 187/533] release: 4.55.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 20 ++++++++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 25 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 05aef7cb8..a2fc4e17b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.55.0" + ".": "4.55.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index b667e35a6..0b24ff704 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,25 @@ # Changelog +## 4.55.1 (2024-08-07) + +Full Changelog: [v4.55.0...v4.55.1](https://github.com/openai/openai-node/compare/v4.55.0...v4.55.1) + +### Bug Fixes + +* **helpers/zod:** correct schema generation for recursive schemas ([cb54d93](https://github.com/openai/openai-node/commit/cb54d93162c86ecfd476733805a431aab25d86d6)) + + +### Chores + +* **api:** remove old `AssistantResponseFormat` type ([#967](https://github.com/openai/openai-node/issues/967)) ([9fd94bf](https://github.com/openai/openai-node/commit/9fd94bfc35128d3bc45fbf0a65e6a8d2ea4562d5)) +* **internal:** update test snapshots ([bceea60](https://github.com/openai/openai-node/commit/bceea60e461c40a9e59d52772122dd612a2ff1c4)) +* **vendor/zodJsonSchema:** add option to duplicate top-level ref ([84b8a38](https://github.com/openai/openai-node/commit/84b8a3820b0ce1c78bfd3db468d8d2962875b4ab)) + + +### Documentation + +* **examples:** add UI generation example script ([c75c017](https://github.com/openai/openai-node/commit/c75c017c16cbfe3fc60ea4ee5779782005e64463)) + ## 4.55.0 (2024-08-06) Full Changelog: [v4.54.0...v4.55.0](https://github.com/openai/openai-node/compare/v4.54.0...v4.55.0) diff --git a/README.md b/README.md index bc58e1dad..5d98201af 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.55.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.55.1/mod.ts'; ``` diff --git a/package.json b/package.json index 8c6e7ac58..923009ad9 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.55.0", + "version": "4.55.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index a3e08548c..f50d7c45d 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.55.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.55.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 70a133b54..059a09162 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.55.0'; // x-release-please-version +export const VERSION = '4.55.1'; // x-release-please-version From 8e8a5c46626176bdd569624a77b80a1ad8058f17 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 8 Aug 2024 14:45:47 +0100 Subject: [PATCH 188/533] fix(helpers/zod): correct logic for adding root schema to definitions --- .../zod-to-json-schema/zodToJsonSchema.ts | 6 +- tests/lib/parser.test.ts | 179 ++++++++++++++++++ 2 files changed, 181 insertions(+), 4 deletions(-) diff --git a/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts b/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts index 5547c4c37..1d95a98ba 100644 --- a/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts +++ b/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts @@ -61,8 +61,6 @@ const zodToJsonSchema = ( main.title = title; } - const rootRefPath = name ? [...refs.basePath, refs.definitionPath, name].join('/') : null; - const combined: ReturnType> = name === undefined ? definitions ? @@ -74,13 +72,13 @@ const zodToJsonSchema = ( : refs.nameStrategy === 'duplicate-ref' ? { ...main, - ...(definitions || refs.seenRefs.has(rootRefPath!) ? + ...(definitions || refs.seenRefs.size ? { [refs.definitionPath]: { ...definitions, // only actually duplicate the schema definition if it was ever referenced // otherwise the duplication is completely pointless - ...(refs.seenRefs.has(rootRefPath!) ? { [name]: main } : undefined), + ...(refs.seenRefs.size ? { [name]: main } : undefined), }, } : undefined), diff --git a/tests/lib/parser.test.ts b/tests/lib/parser.test.ts index 118954492..296787450 100644 --- a/tests/lib/parser.test.ts +++ b/tests/lib/parser.test.ts @@ -267,5 +267,184 @@ describe('.parse()', () => { } `); }); + + test('merged schemas', async () => { + const personSchema = z.object({ + name: z.string(), + phone_number: z.string().nullable(), + }); + + const contactPersonSchema = z.object({ + person1: personSchema.merge( + z.object({ + roles: z + .array(z.enum(['parent', 'child', 'sibling', 'spouse', 'friend', 'other'])) + .describe('Any roles for which the contact is important, use other for custom roles'), + description: z + .string() + .nullable() + .describe('Open text for any other relevant information about what the contact does.'), + }), + ), + person2: personSchema.merge( + z.object({ + differentField: z.string(), + }), + ), + }); + + expect(zodResponseFormat(contactPersonSchema, 'contactPerson').json_schema.schema) + .toMatchInlineSnapshot(` + { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "definitions": { + "contactPerson": { + "additionalProperties": false, + "properties": { + "person1": { + "additionalProperties": false, + "properties": { + "description": { + "description": "Open text for any other relevant information about what the contact does.", + "type": [ + "string", + "null", + ], + }, + "name": { + "type": "string", + }, + "phone_number": { + "type": [ + "string", + "null", + ], + }, + "roles": { + "description": "Any roles for which the contact is important, use other for custom roles", + "items": { + "enum": [ + "parent", + "child", + "sibling", + "spouse", + "friend", + "other", + ], + "type": "string", + }, + "type": "array", + }, + }, + "required": [ + "name", + "phone_number", + "roles", + "description", + ], + "type": "object", + }, + "person2": { + "additionalProperties": false, + "properties": { + "differentField": { + "type": "string", + }, + "name": { + "$ref": "#/definitions/contactPerson/properties/person1/properties/name", + }, + "phone_number": { + "$ref": "#/definitions/contactPerson/properties/person1/properties/phone_number", + }, + }, + "required": [ + "name", + "phone_number", + "differentField", + ], + "type": "object", + }, + }, + "required": [ + "person1", + "person2", + ], + "type": "object", + }, + }, + "properties": { + "person1": { + "additionalProperties": false, + "properties": { + "description": { + "description": "Open text for any other relevant information about what the contact does.", + "type": [ + "string", + "null", + ], + }, + "name": { + "type": "string", + }, + "phone_number": { + "type": [ + "string", + "null", + ], + }, + "roles": { + "description": "Any roles for which the contact is important, use other for custom roles", + "items": { + "enum": [ + "parent", + "child", + "sibling", + "spouse", + "friend", + "other", + ], + "type": "string", + }, + "type": "array", + }, + }, + "required": [ + "name", + "phone_number", + "roles", + "description", + ], + "type": "object", + }, + "person2": { + "additionalProperties": false, + "properties": { + "differentField": { + "type": "string", + }, + "name": { + "$ref": "#/definitions/contactPerson/properties/person1/properties/name", + }, + "phone_number": { + "$ref": "#/definitions/contactPerson/properties/person1/properties/phone_number", + }, + }, + "required": [ + "name", + "phone_number", + "differentField", + ], + "type": "object", + }, + }, + "required": [ + "person1", + "person2", + ], + "type": "object", + } + `); + }); }); }); From d285d4ff80b10562a5cc5ff3fbb28ce9ac4a254a Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 8 Aug 2024 15:19:23 +0100 Subject: [PATCH 189/533] fix(helpers/zod): add `extract-to-root` ref strategy --- src/_vendor/zod-to-json-schema/Options.ts | 4 +- src/_vendor/zod-to-json-schema/Refs.ts | 7 ++-- src/_vendor/zod-to-json-schema/parseDef.ts | 18 +++++++++ src/_vendor/zod-to-json-schema/util.ts | 11 ++++++ .../zod-to-json-schema/zodToJsonSchema.ts | 39 ++++++++++--------- src/helpers/zod.ts | 1 + tests/lib/parser.test.ts | 17 ++++++-- 7 files changed, 69 insertions(+), 28 deletions(-) create mode 100644 src/_vendor/zod-to-json-schema/util.ts diff --git a/src/_vendor/zod-to-json-schema/Options.ts b/src/_vendor/zod-to-json-schema/Options.ts index 9a5628846..e765eef78 100644 --- a/src/_vendor/zod-to-json-schema/Options.ts +++ b/src/_vendor/zod-to-json-schema/Options.ts @@ -10,7 +10,7 @@ export const ignoreOverride = Symbol('Let zodToJsonSchema decide on which parser export type Options = { name: string | undefined; - $refStrategy: 'root' | 'relative' | 'none' | 'seen'; + $refStrategy: 'root' | 'relative' | 'none' | 'seen' | 'extract-to-root'; basePath: string[]; effectStrategy: 'input' | 'any'; pipeStrategy: 'input' | 'output' | 'all'; @@ -20,7 +20,7 @@ export type Options = { target: Target; strictUnions: boolean; definitionPath: string; - definitions: Record; + definitions: Record; errorMessages: boolean; markdownDescription: boolean; patternStrategy: 'escape' | 'preserve'; diff --git a/src/_vendor/zod-to-json-schema/Refs.ts b/src/_vendor/zod-to-json-schema/Refs.ts index 559641601..ea63c076a 100644 --- a/src/_vendor/zod-to-json-schema/Refs.ts +++ b/src/_vendor/zod-to-json-schema/Refs.ts @@ -1,6 +1,7 @@ -import { ZodTypeDef } from 'zod'; +import type { ZodTypeDef } from 'zod'; import { getDefaultOptions, Options, Targets } from './Options'; import { JsonSchema7Type } from './parseDef'; +import { zodDef } from './util'; export type Refs = { seen: Map; @@ -33,9 +34,9 @@ export const getRefs = (options?: string | Partial>): Refs => { seenRefs: new Set(), seen: new Map( Object.entries(_options.definitions).map(([name, def]) => [ - def._def, + zodDef(def), { - def: def._def, + def: zodDef(def), path: [..._options.basePath, _options.definitionPath, name], // Resolution of references will be forced even though seen, so it's ok that the schema is undefined here for now. jsonSchema: undefined, diff --git a/src/_vendor/zod-to-json-schema/parseDef.ts b/src/_vendor/zod-to-json-schema/parseDef.ts index d37653d4e..a8c8e7063 100644 --- a/src/_vendor/zod-to-json-schema/parseDef.ts +++ b/src/_vendor/zod-to-json-schema/parseDef.ts @@ -122,6 +122,24 @@ const get$ref = ( switch (refs.$refStrategy) { case 'root': return { $ref: item.path.join('/') }; + // this case is needed as OpenAI strict mode doesn't support top-level `$ref`s, i.e. + // the top-level schema *must* be `{"type": "object", "properties": {...}}` but if we ever + // need to define a `$ref`, relative `$ref`s aren't supported, so we need to extract + // the schema to `#/definitions/` and reference that. + // + // e.g. if we need to reference a schema at + // `["#","definitions","contactPerson","properties","person1","properties","name"]` + // then we'll extract it out to `contactPerson_properties_person1_properties_name` + case 'extract-to-root': + const name = item.path.slice(refs.basePath.length + 1).join('_'); + + // we don't need to extract the root schema in this case, as it's already + // been added to the definitions + if (name !== refs.name && refs.nameStrategy === 'duplicate-ref') { + refs.definitions[name] = item.def; + } + + return { $ref: [...refs.basePath, refs.definitionPath, name].join('/') }; case 'relative': return { $ref: getRelativePath(refs.currentPath, item.path) }; case 'none': diff --git a/src/_vendor/zod-to-json-schema/util.ts b/src/_vendor/zod-to-json-schema/util.ts new file mode 100644 index 000000000..870ab47a2 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/util.ts @@ -0,0 +1,11 @@ +import type { ZodSchema, ZodTypeDef } from 'zod'; + +export const zodDef = (zodSchema: ZodSchema | ZodTypeDef): ZodTypeDef => { + return '_def' in zodSchema ? zodSchema._def : zodSchema; +}; + +export function isEmptyObj(obj: Object | null | undefined): boolean { + if (!obj) return true; + for (const _k in obj) return false; + return true; +} diff --git a/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts b/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts index 1d95a98ba..2078b503f 100644 --- a/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts +++ b/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts @@ -2,6 +2,7 @@ import { ZodSchema } from 'zod'; import { Options, Targets } from './Options'; import { JsonSchema7Type, parseDef } from './parseDef'; import { getRefs } from './Refs'; +import { zodDef, isEmptyObj } from './util'; const zodToJsonSchema = ( schema: ZodSchema, @@ -16,25 +17,6 @@ const zodToJsonSchema = ( } => { const refs = getRefs(options); - const definitions = - typeof options === 'object' && options.definitions ? - Object.entries(options.definitions).reduce( - (acc, [name, schema]) => ({ - ...acc, - [name]: - parseDef( - schema._def, - { - ...refs, - currentPath: [...refs.basePath, refs.definitionPath, name], - }, - true, - ) ?? {}, - }), - {}, - ) - : undefined; - const name = typeof options === 'string' ? options : options?.nameStrategy === 'title' ? undefined @@ -61,6 +43,25 @@ const zodToJsonSchema = ( main.title = title; } + const definitions = + !isEmptyObj(refs.definitions) ? + Object.entries(refs.definitions).reduce( + (acc, [name, schema]) => ({ + ...acc, + [name]: + parseDef( + zodDef(schema), + { + ...refs, + currentPath: [...refs.basePath, refs.definitionPath, name], + }, + true, + ) ?? {}, + }), + {}, + ) + : undefined; + const combined: ReturnType> = name === undefined ? definitions ? diff --git a/src/helpers/zod.ts b/src/helpers/zod.ts index a7b45268d..aa09ffaac 100644 --- a/src/helpers/zod.ts +++ b/src/helpers/zod.ts @@ -13,6 +13,7 @@ function zodToJsonSchema(schema: z.ZodType, options: { name: string }): Record { "type": "string", }, "name": { - "$ref": "#/definitions/contactPerson/properties/person1/properties/name", + "$ref": "#/definitions/contactPerson_properties_person1_properties_name", }, "phone_number": { - "$ref": "#/definitions/contactPerson/properties/person1/properties/phone_number", + "$ref": "#/definitions/contactPerson_properties_person1_properties_phone_number", }, }, "required": [ @@ -372,6 +372,15 @@ describe('.parse()', () => { ], "type": "object", }, + "contactPerson_properties_person1_properties_name": { + "type": "string", + }, + "contactPerson_properties_person1_properties_phone_number": { + "type": [ + "string", + "null", + ], + }, }, "properties": { "person1": { @@ -424,10 +433,10 @@ describe('.parse()', () => { "type": "string", }, "name": { - "$ref": "#/definitions/contactPerson/properties/person1/properties/name", + "$ref": "#/definitions/contactPerson_properties_person1_properties_name", }, "phone_number": { - "$ref": "#/definitions/contactPerson/properties/person1/properties/phone_number", + "$ref": "#/definitions/contactPerson_properties_person1_properties_phone_number", }, }, "required": [ From 822170690601f16a6c94db07930bafb0061711e9 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 8 Aug 2024 15:19:38 +0100 Subject: [PATCH 190/533] chore(internal): add README for vendored zod-to-json-schema --- src/_vendor/zod-to-json-schema/README.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 src/_vendor/zod-to-json-schema/README.md diff --git a/src/_vendor/zod-to-json-schema/README.md b/src/_vendor/zod-to-json-schema/README.md new file mode 100644 index 000000000..ffb351242 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/README.md @@ -0,0 +1,3 @@ +# Zod to Json Schema + +Vendored version of https://github.com/StefanTerdell/zod-to-json-schema that has been updated to generate JSON Schemas that are compatible with OpenAI's [strict mode](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas) From fc22483173b521b0b194264e6d975c3517c06216 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 8 Aug 2024 15:25:59 +0100 Subject: [PATCH 191/533] fix(helpers/zod): add `nullableStrategy` option so we can generate `nullable: true` instead of `type: ["foo", "null"]` and avoid having to change the `target` json schema version as we're in a weird in the middle state --- src/_vendor/zod-to-json-schema/Options.ts | 2 ++ .../zod-to-json-schema/parsers/nullable.ts | 2 +- src/helpers/zod.ts | 1 + tests/lib/parser.test.ts | 30 +++++++------------ 4 files changed, 14 insertions(+), 21 deletions(-) diff --git a/src/_vendor/zod-to-json-schema/Options.ts b/src/_vendor/zod-to-json-schema/Options.ts index e765eef78..a83690e59 100644 --- a/src/_vendor/zod-to-json-schema/Options.ts +++ b/src/_vendor/zod-to-json-schema/Options.ts @@ -17,6 +17,7 @@ export type Options = { dateStrategy: DateStrategy | DateStrategy[]; mapStrategy: 'entries' | 'record'; removeAdditionalStrategy: 'passthrough' | 'strict'; + nullableStrategy: 'from-target' | 'property'; target: Target; strictUnions: boolean; definitionPath: string; @@ -45,6 +46,7 @@ export const defaultOptions: Options = { pipeStrategy: 'all', dateStrategy: 'format:date-time', mapStrategy: 'entries', + nullableStrategy: 'from-target', removeAdditionalStrategy: 'passthrough', definitionPath: 'definitions', target: 'jsonSchema7', diff --git a/src/_vendor/zod-to-json-schema/parsers/nullable.ts b/src/_vendor/zod-to-json-schema/parsers/nullable.ts index efb70076e..0d7063610 100644 --- a/src/_vendor/zod-to-json-schema/parsers/nullable.ts +++ b/src/_vendor/zod-to-json-schema/parsers/nullable.ts @@ -17,7 +17,7 @@ export function parseNullableDef(def: ZodNullableDef, refs: Refs): JsonSchema7Nu ['ZodString', 'ZodNumber', 'ZodBigInt', 'ZodBoolean', 'ZodNull'].includes(def.innerType._def.typeName) && (!def.innerType._def.checks || !def.innerType._def.checks.length) ) { - if (refs.target === 'openApi3') { + if (refs.target === 'openApi3' || refs.nullableStrategy === 'property') { return { type: primitiveMappings[def.innerType._def.typeName as keyof typeof primitiveMappings], nullable: true, diff --git a/src/helpers/zod.ts b/src/helpers/zod.ts index aa09ffaac..1946b2199 100644 --- a/src/helpers/zod.ts +++ b/src/helpers/zod.ts @@ -14,6 +14,7 @@ function zodToJsonSchema(schema: z.ZodType, options: { name: string }): Record { "properties": { "description": { "description": "Open text for any other relevant information about what the contact does.", - "type": [ - "string", - "null", - ], + "nullable": true, + "type": "string", }, "name": { "type": "string", }, "phone_number": { - "type": [ - "string", - "null", - ], + "nullable": true, + "type": "string", }, "roles": { "description": "Any roles for which the contact is important, use other for custom roles", @@ -376,10 +372,8 @@ describe('.parse()', () => { "type": "string", }, "contactPerson_properties_person1_properties_phone_number": { - "type": [ - "string", - "null", - ], + "nullable": true, + "type": "string", }, }, "properties": { @@ -388,19 +382,15 @@ describe('.parse()', () => { "properties": { "description": { "description": "Open text for any other relevant information about what the contact does.", - "type": [ - "string", - "null", - ], + "nullable": true, + "type": "string", }, "name": { "type": "string", }, "phone_number": { - "type": [ - "string", - "null", - ], + "nullable": true, + "type": "string", }, "roles": { "description": "Any roles for which the contact is important, use other for custom roles", From 89ace3da146a6eb0293dc8c54731bdd489760ff0 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 8 Aug 2024 15:28:51 +0100 Subject: [PATCH 192/533] chore(tests): add more API request tests --- tests/lib/__snapshots__/parser.test.ts.snap | 28 +++++++++++++ tests/lib/parser.test.ts | 44 +++++++++++++++++++++ tests/utils/mock-snapshots.ts | 7 +++- 3 files changed, 77 insertions(+), 2 deletions(-) diff --git a/tests/lib/__snapshots__/parser.test.ts.snap b/tests/lib/__snapshots__/parser.test.ts.snap index e6f2799af..715c268ff 100644 --- a/tests/lib/__snapshots__/parser.test.ts.snap +++ b/tests/lib/__snapshots__/parser.test.ts.snap @@ -28,6 +28,34 @@ exports[`.parse() zod deserialises response_format 1`] = ` " `; +exports[`.parse() zod merged schemas 2`] = ` +"{ + "id": "chatcmpl-9tyPgktyF5JgREIZd0XZI4XgrBAD2", + "object": "chat.completion", + "created": 1723127296, + "model": "gpt-4o-2024-08-06", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "{\\"person1\\":{\\"name\\":\\"Jane Doe\\",\\"phone_number\\":\\"+1234567890\\",\\"roles\\":[\\"other\\"],\\"description\\":\\"Engineer at OpenAI. Email: jane@openai.com\\"},\\"person2\\":{\\"name\\":\\"John Smith\\",\\"phone_number\\":\\"+0987654321\\",\\"differentField\\":\\"Engineer at OpenAI. Email: john@openai.com\\"}}", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 61, + "completion_tokens": 72, + "total_tokens": 133 + }, + "system_fingerprint": "fp_845eaabc1f" +} +" +`; + exports[`.parse() zod top-level recursive schemas 1`] = ` "{ "id": "chatcmpl-9taiMDrRVRIkk1Xg1yE82UjnYuZjt", diff --git a/tests/lib/parser.test.ts b/tests/lib/parser.test.ts index 9668cf7d0..3fb3c948a 100644 --- a/tests/lib/parser.test.ts +++ b/tests/lib/parser.test.ts @@ -444,6 +444,50 @@ describe('.parse()', () => { "type": "object", } `); + + const completion = await makeSnapshotRequest( + (openai) => + openai.beta.chat.completions.parse({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'system', + content: 'You are a helpful assistant.', + }, + { + role: 'user', + content: + 'jane doe, born nov 16, engineer at openai, jane@openai.com. john smith, born march 1, enigneer at openai, john@openai.com', + }, + ], + response_format: zodResponseFormat(contactPersonSchema, 'contactPerson'), + }), + 2, + ); + + expect(completion.choices[0]?.message).toMatchInlineSnapshot(` + { + "content": "{"person1":{"name":"Jane Doe","phone_number":"+1234567890","roles":["other"],"description":"Engineer at OpenAI. Email: jane@openai.com"},"person2":{"name":"John Smith","phone_number":"+0987654321","differentField":"Engineer at OpenAI. Email: john@openai.com"}}", + "parsed": { + "person1": { + "description": "Engineer at OpenAI. Email: jane@openai.com", + "name": "Jane Doe", + "phone_number": "+1234567890", + "roles": [ + "other", + ], + }, + "person2": { + "differentField": "Engineer at OpenAI. Email: john@openai.com", + "name": "John Smith", + "phone_number": "+0987654321", + }, + }, + "refusal": null, + "role": "assistant", + "tool_calls": [], + } + `); }); }); }); diff --git a/tests/utils/mock-snapshots.ts b/tests/utils/mock-snapshots.ts index 2bf09eda7..317bf6b0f 100644 --- a/tests/utils/mock-snapshots.ts +++ b/tests/utils/mock-snapshots.ts @@ -5,7 +5,10 @@ import { RequestInfo } from 'openai/_shims/auto/types'; import { mockFetch } from './mock-fetch'; import { Readable } from 'stream'; -export async function makeSnapshotRequest(requestFn: (client: OpenAI) => Promise): Promise { +export async function makeSnapshotRequest( + requestFn: (client: OpenAI) => Promise, + snapshotIndex = 1, +): Promise { if (process.env['UPDATE_API_SNAPSHOTS'] === '1') { var capturedResponseContent: string | null = null; @@ -27,7 +30,7 @@ export async function makeSnapshotRequest(requestFn: (client: OpenAI) => Prom return result; } - const qualifiedSnapshotName = `${expect.getState().currentTestName} 1`; + const qualifiedSnapshotName = [expect.getState().currentTestName, snapshotIndex].join(' '); const snapshotState = expect.getState()['snapshotState']; (snapshotState._uncheckedKeys as Set).delete(qualifiedSnapshotName); From f3b63cce927414621a705f9de479b8bfc0e35c94 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 19:56:03 +0000 Subject: [PATCH 193/533] release: 4.55.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 16 ++++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 21 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a2fc4e17b..f48cee097 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.55.1" + ".": "4.55.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b24ff704..a1e89d8f8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog +## 4.55.2 (2024-08-08) + +Full Changelog: [v4.55.1...v4.55.2](https://github.com/openai/openai-node/compare/v4.55.1...v4.55.2) + +### Bug Fixes + +* **helpers/zod:** add `extract-to-root` ref strategy ([ef3c73c](https://github.com/openai/openai-node/commit/ef3c73cfdf1a8e45346417812168e476fea65690)) +* **helpers/zod:** add `nullableStrategy` option ([ad89892](https://github.com/openai/openai-node/commit/ad89892f4ac0daba161ce97267a165a12f67c341)) +* **helpers/zod:** correct logic for adding root schema to definitions ([e4a247a](https://github.com/openai/openai-node/commit/e4a247a2a87b4d3bde55891b31e07413d3a9f00d)) + + +### Chores + +* **internal:** add README for vendored zod-to-json-schema ([d8a80a9](https://github.com/openai/openai-node/commit/d8a80a915dfe723a59f512e7128aecf857324388)) +* **tests:** add more API request tests ([04c1590](https://github.com/openai/openai-node/commit/04c1590a64127c43898c3c88bcbdd624d54008f6)) + ## 4.55.1 (2024-08-07) Full Changelog: [v4.55.0...v4.55.1](https://github.com/openai/openai-node/compare/v4.55.0...v4.55.1) diff --git a/README.md b/README.md index 5d98201af..c9bb46a73 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.55.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.55.2/mod.ts'; ``` diff --git a/package.json b/package.json index 923009ad9..b62736b5b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.55.1", + "version": "4.55.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index f50d7c45d..8aebc9cb4 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.55.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.55.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 059a09162..163a11feb 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.55.1'; // x-release-please-version +export const VERSION = '4.55.2'; // x-release-please-version From 985428b600023a07d2a29825e4b495bd34fe36c0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 20:21:06 +0000 Subject: [PATCH 194/533] chore(internal): updates (#975) --- .stats.yml | 2 +- package.json | 2 +- scripts/format | 2 +- scripts/lint | 2 +- src/resources/chat/chat.ts | 2 +- src/resources/chat/completions.ts | 5 +++++ 6 files changed, 10 insertions(+), 5 deletions(-) diff --git a/.stats.yml b/.stats.yml index ac652c927..cad2c64cd 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4097c2f86beb3f3bb021775cd1dfa240e960caf842aeefc2e08da4dc0851ea79.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-97797a9363b9960b5f2fbdc84426a2b91e75533ecd409fe99e37c231180a4339.yml diff --git a/package.json b/package.json index b62736b5b..003877102 100644 --- a/package.json +++ b/package.json @@ -21,7 +21,7 @@ "prepare": "if ./scripts/utils/check-is-in-git-install.sh; then ./scripts/build; fi", "tsn": "ts-node -r tsconfig-paths/register", "lint": "./scripts/lint", - "fix": "eslint --fix --ext ts,js ." + "fix": "./scripts/format" }, "dependencies": { "@types/node": "^18.11.18", diff --git a/scripts/format b/scripts/format index d297e762f..a6bb9d03a 100755 --- a/scripts/format +++ b/scripts/format @@ -5,4 +5,4 @@ set -e cd "$(dirname "$0")/.." echo "==> Running eslint --fix" -./node_modules/.bin/eslint --fix --ext ts,js . +ESLINT_USE_FLAT_CONFIG="false" ./node_modules/.bin/eslint --fix --ext ts,js . diff --git a/scripts/lint b/scripts/lint index 6b0e5dc3e..4af1de013 100755 --- a/scripts/lint +++ b/scripts/lint @@ -5,4 +5,4 @@ set -e cd "$(dirname "$0")/.." echo "==> Running eslint" -./node_modules/.bin/eslint --ext ts,js . +ESLINT_USE_FLAT_CONFIG="false" ./node_modules/.bin/eslint --ext ts,js . diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 031b4059b..684b1307a 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -10,8 +10,8 @@ export class Chat extends APIResource { export type ChatModel = | 'gpt-4o' - | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-05-13' + | 'gpt-4o-2024-08-06' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 91d7da801..26e7490e0 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -829,6 +829,11 @@ export interface ChatCompletionCreateParamsBase { * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which guarantees the model will match your supplied JSON schema. Learn + * more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the * message the model generates is valid JSON. * From cc13af9fa7e76e774d3132bba2427bb0176bd622 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 20:21:32 +0000 Subject: [PATCH 195/533] release: 4.55.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f48cee097..d34e87f46 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.55.2" + ".": "4.55.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index a1e89d8f8..fefb710b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.55.3 (2024-08-08) + +Full Changelog: [v4.55.2...v4.55.3](https://github.com/openai/openai-node/compare/v4.55.2...v4.55.3) + +### Chores + +* **internal:** updates ([#975](https://github.com/openai/openai-node/issues/975)) ([313a190](https://github.com/openai/openai-node/commit/313a19059a61893887ac0b57bb488c24bc40f099)) + ## 4.55.2 (2024-08-08) Full Changelog: [v4.55.1...v4.55.2](https://github.com/openai/openai-node/compare/v4.55.1...v4.55.2) diff --git a/README.md b/README.md index c9bb46a73..13f449a29 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.55.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.55.3/mod.ts'; ``` diff --git a/package.json b/package.json index 003877102..31f5bc16a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.55.2", + "version": "4.55.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 8aebc9cb4..cd4cbd4f5 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.55.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.55.3/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 163a11feb..ec9119c18 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.55.2'; // x-release-please-version +export const VERSION = '4.55.3'; // x-release-please-version From f4f8c5a46bd03fe63b7e9f95d12766d2b8de9484 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 16:32:31 +0000 Subject: [PATCH 196/533] chore(ci): codeowners file (#980) --- .github/CODEOWNERS | 3 +++ .github/workflows/create-releases.yml | 1 + .github/workflows/release-doctor.yml | 1 + bin/check-release-environment | 1 + 4 files changed, 6 insertions(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 3ce5f8d00..d58c8454c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1,4 @@ +# This file is used to automatically assign reviewers to PRs +# For more information see: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners + * @openai/sdks-team diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml index d6d802e16..d5ae1f755 100644 --- a/.github/workflows/create-releases.yml +++ b/.github/workflows/create-releases.yml @@ -62,3 +62,4 @@ jobs: env: DENO_PUSH_REMOTE_URL: https://username:${{ steps.generate_token.outputs.token }}@github.com/openai/openai-deno-build.git DENO_PUSH_BRANCH: main + diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 3bb1d714f..37bc09e80 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -21,3 +21,4 @@ jobs: env: STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} NPM_TOKEN: ${{ secrets.OPENAI_NPM_TOKEN || secrets.NPM_TOKEN }} + diff --git a/bin/check-release-environment b/bin/check-release-environment index 9651d95c8..dbfd546bf 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -23,3 +23,4 @@ if [[ lenErrors -gt 0 ]]; then fi echo "The environment is ready to push releases!" + From 9b7568cad022167150eef4e534124d007738bebb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 17:43:24 +0000 Subject: [PATCH 197/533] chore(ci): bump prism mock server version (#982) --- scripts/mock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mock b/scripts/mock index f58615769..d2814ae6a 100755 --- a/scripts/mock +++ b/scripts/mock @@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}" # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then - npm exec --package=@stainless-api/prism-cli@5.8.4 -- prism mock "$URL" &> .prism.log & + npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log & # Wait for server to come online echo -n "Waiting for server" @@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then echo else - npm exec --package=@stainless-api/prism-cli@5.8.4 -- prism mock "$URL" + npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" fi From 510c866863018091e0a7e4d80c47d76ec0771d46 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 9 Aug 2024 19:49:33 +0100 Subject: [PATCH 198/533] fix(helpers/zod): nested union schema extraction (#979) --- .../zod-to-json-schema/zodToJsonSchema.ts | 36 +- tests/lib/__snapshots__/parser.test.ts.snap | 52 ++- tests/lib/parser.test.ts | 346 ++++++++++++++++-- 3 files changed, 378 insertions(+), 56 deletions(-) diff --git a/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts b/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts index 2078b503f..1c3290008 100644 --- a/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts +++ b/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts @@ -43,24 +43,24 @@ const zodToJsonSchema = ( main.title = title; } - const definitions = - !isEmptyObj(refs.definitions) ? - Object.entries(refs.definitions).reduce( - (acc, [name, schema]) => ({ - ...acc, - [name]: - parseDef( - zodDef(schema), - { - ...refs, - currentPath: [...refs.basePath, refs.definitionPath, name], - }, - true, - ) ?? {}, - }), - {}, - ) - : undefined; + const definitions = (() => { + if (isEmptyObj(refs.definitions)) { + return undefined; + } + + const definitions: Record = {}; + + for (const [name, zodSchema] of Object.entries(refs.definitions)) { + definitions[name] = + parseDef( + zodDef(zodSchema), + { ...refs, currentPath: [...refs.basePath, refs.definitionPath, name] }, + true, + ) ?? {}; + } + + return definitions; + })(); const combined: ReturnType> = name === undefined ? diff --git a/tests/lib/__snapshots__/parser.test.ts.snap b/tests/lib/__snapshots__/parser.test.ts.snap index 715c268ff..d98db2345 100644 --- a/tests/lib/__snapshots__/parser.test.ts.snap +++ b/tests/lib/__snapshots__/parser.test.ts.snap @@ -2,16 +2,16 @@ exports[`.parse() zod deserialises response_format 1`] = ` "{ - "id": "chatcmpl-9tZXFjiGKgtrHZeIxvkklWe51DYZp", + "id": "chatcmpl-9uLhvwLPvKOZoJ7hwaa666fYuxYif", "object": "chat.completion", - "created": 1723031665, + "created": 1723216839, "model": "gpt-4o-2024-08-06", "choices": [ { "index": 0, "message": { "role": "assistant", - "content": "{\\"city\\":\\"San Francisco\\",\\"units\\":\\"f\\"}", + "content": "{\\"city\\":\\"San Francisco\\",\\"units\\":\\"c\\"}", "refusal": null }, "logprobs": null, @@ -30,16 +30,16 @@ exports[`.parse() zod deserialises response_format 1`] = ` exports[`.parse() zod merged schemas 2`] = ` "{ - "id": "chatcmpl-9tyPgktyF5JgREIZd0XZI4XgrBAD2", + "id": "chatcmpl-9uLi0HJ6HYH0FM1VI1N6XCREiGvX1", "object": "chat.completion", - "created": 1723127296, + "created": 1723216844, "model": "gpt-4o-2024-08-06", "choices": [ { "index": 0, "message": { "role": "assistant", - "content": "{\\"person1\\":{\\"name\\":\\"Jane Doe\\",\\"phone_number\\":\\"+1234567890\\",\\"roles\\":[\\"other\\"],\\"description\\":\\"Engineer at OpenAI. Email: jane@openai.com\\"},\\"person2\\":{\\"name\\":\\"John Smith\\",\\"phone_number\\":\\"+0987654321\\",\\"differentField\\":\\"Engineer at OpenAI. Email: john@openai.com\\"}}", + "content": "{\\"person1\\":{\\"name\\":\\"Jane Doe\\",\\"phone_number\\":\\".\\",\\"roles\\":[\\"other\\"],\\"description\\":\\"Engineer at OpenAI, born Nov 16, contact email: jane@openai.com\\"},\\"person2\\":{\\"name\\":\\"John Smith\\",\\"phone_number\\":\\"john@openai.com\\",\\"differentField\\":\\"Engineer at OpenAI, born March 1.\\"}}", "refusal": null }, "logprobs": null, @@ -51,23 +51,51 @@ exports[`.parse() zod merged schemas 2`] = ` "completion_tokens": 72, "total_tokens": 133 }, - "system_fingerprint": "fp_845eaabc1f" + "system_fingerprint": "fp_2a322c9ffc" +} +" +`; + +exports[`.parse() zod nested schema extraction 2`] = ` +"{ + "id": "chatcmpl-9uLi6hkH6VcoaYiNEzy3h56QRAyns", + "object": "chat.completion", + "created": 1723216850, + "model": "gpt-4o-2024-08-06", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "{\\"name\\":\\"TodoApp\\",\\"fields\\":[{\\"type\\":\\"string\\",\\"name\\":\\"taskId\\",\\"metadata\\":{\\"foo\\":\\"unique identifier for each task\\"}},{\\"type\\":\\"string\\",\\"name\\":\\"title\\",\\"metadata\\":{\\"foo\\":\\"title of the task\\"}},{\\"type\\":\\"string\\",\\"name\\":\\"description\\",\\"metadata\\":{\\"foo\\":\\"detailed description of the task. This is optional.\\"}},{\\"type\\":\\"string\\",\\"name\\":\\"status\\",\\"metadata\\":{\\"foo\\":\\"status of the task, e.g., pending, completed, etc.\\"}},{\\"type\\":\\"string\\",\\"name\\":\\"dueDate\\",\\"metadata\\":null},{\\"type\\":\\"string\\",\\"name\\":\\"priority\\",\\"metadata\\":{\\"foo\\":\\"priority level of the task, e.g., low, medium, high\\"}},{\\"type\\":\\"string\\",\\"name\\":\\"creationDate\\",\\"metadata\\":{\\"foo\\":\\"date when the task was created\\"}},{\\"type\\":\\"string\\",\\"name\\":\\"lastModifiedDate\\",\\"metadata\\":{\\"foo\\":\\"date when the task was last modified\\"}},{\\"type\\":\\"string\\",\\"name\\":\\"tags\\",\\"metadata\\":{\\"foo\\":\\"tags associated with the task, for categorization\\"}}]}", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 36, + "completion_tokens": 208, + "total_tokens": 244 + }, + "system_fingerprint": "fp_2a322c9ffc" } " `; exports[`.parse() zod top-level recursive schemas 1`] = ` "{ - "id": "chatcmpl-9taiMDrRVRIkk1Xg1yE82UjnYuZjt", + "id": "chatcmpl-9uLhw79ArBF4KsQQOlsoE68m6vh6v", "object": "chat.completion", - "created": 1723036198, + "created": 1723216840, "model": "gpt-4o-2024-08-06", "choices": [ { "index": 0, "message": { "role": "assistant", - "content": "{\\"type\\":\\"form\\",\\"label\\":\\"User Profile Form\\",\\"children\\":[{\\"type\\":\\"field\\",\\"label\\":\\"Full Name\\",\\"children\\":[],\\"attributes\\":[{\\"name\\":\\"type\\",\\"value\\":\\"text\\"},{\\"name\\":\\"placeholder\\",\\"value\\":\\"Enter your full name\\"}]},{\\"type\\":\\"field\\",\\"label\\":\\"Email Address\\",\\"children\\":[],\\"attributes\\":[{\\"name\\":\\"type\\",\\"value\\":\\"email\\"},{\\"name\\":\\"placeholder\\",\\"value\\":\\"Enter your email address\\"}]},{\\"type\\":\\"field\\",\\"label\\":\\"Phone Number\\",\\"children\\":[],\\"attributes\\":[{\\"name\\":\\"type\\",\\"value\\":\\"tel\\"},{\\"name\\":\\"placeholder\\",\\"value\\":\\"Enter your phone number\\"}]},{\\"type\\":\\"button\\",\\"label\\":\\"Submit\\",\\"children\\":[],\\"attributes\\":[{\\"name\\":\\"type\\",\\"value\\":\\"submit\\"}]}],\\"attributes\\":[{\\"name\\":\\"method\\",\\"value\\":\\"post\\"},{\\"name\\":\\"action\\",\\"value\\":\\"/submit-profile\\"}]}", + "content": "{\\"type\\":\\"form\\",\\"label\\":\\"User Profile Form\\",\\"children\\":[{\\"type\\":\\"field\\",\\"label\\":\\"First Name\\",\\"children\\":[],\\"attributes\\":[{\\"name\\":\\"type\\",\\"value\\":\\"text\\"},{\\"name\\":\\"name\\",\\"value\\":\\"firstName\\"},{\\"name\\":\\"placeholder\\",\\"value\\":\\"Enter your first name\\"}]},{\\"type\\":\\"field\\",\\"label\\":\\"Last Name\\",\\"children\\":[],\\"attributes\\":[{\\"name\\":\\"type\\",\\"value\\":\\"text\\"},{\\"name\\":\\"name\\",\\"value\\":\\"lastName\\"},{\\"name\\":\\"placeholder\\",\\"value\\":\\"Enter your last name\\"}]},{\\"type\\":\\"field\\",\\"label\\":\\"Email Address\\",\\"children\\":[],\\"attributes\\":[{\\"name\\":\\"type\\",\\"value\\":\\"email\\"},{\\"name\\":\\"name\\",\\"value\\":\\"email\\"},{\\"name\\":\\"placeholder\\",\\"value\\":\\"Enter your email address\\"}]},{\\"type\\":\\"button\\",\\"label\\":\\"Submit\\",\\"children\\":[],\\"attributes\\":[{\\"name\\":\\"type\\",\\"value\\":\\"submit\\"}]}],\\"attributes\\":[]}", "refusal": null }, "logprobs": null, @@ -76,8 +104,8 @@ exports[`.parse() zod top-level recursive schemas 1`] = ` ], "usage": { "prompt_tokens": 38, - "completion_tokens": 168, - "total_tokens": 206 + "completion_tokens": 175, + "total_tokens": 213 }, "system_fingerprint": "fp_845eaabc1f" } diff --git a/tests/lib/parser.test.ts b/tests/lib/parser.test.ts index 3fb3c948a..331b16895 100644 --- a/tests/lib/parser.test.ts +++ b/tests/lib/parser.test.ts @@ -32,10 +32,10 @@ describe('.parse()', () => { "index": 0, "logprobs": null, "message": { - "content": "{"city":"San Francisco","units":"f"}", + "content": "{"city":"San Francisco","units":"c"}", "parsed": { "city": "San Francisco", - "units": "f", + "units": "c", }, "refusal": null, "role": "assistant", @@ -76,18 +76,9 @@ describe('.parse()', () => { expect(completion.choices[0]?.message).toMatchInlineSnapshot(` { - "content": "{"type":"form","label":"User Profile Form","children":[{"type":"field","label":"Full Name","children":[],"attributes":[{"name":"type","value":"text"},{"name":"placeholder","value":"Enter your full name"}]},{"type":"field","label":"Email Address","children":[],"attributes":[{"name":"type","value":"email"},{"name":"placeholder","value":"Enter your email address"}]},{"type":"field","label":"Phone Number","children":[],"attributes":[{"name":"type","value":"tel"},{"name":"placeholder","value":"Enter your phone number"}]},{"type":"button","label":"Submit","children":[],"attributes":[{"name":"type","value":"submit"}]}],"attributes":[{"name":"method","value":"post"},{"name":"action","value":"/submit-profile"}]}", + "content": "{"type":"form","label":"User Profile Form","children":[{"type":"field","label":"First Name","children":[],"attributes":[{"name":"type","value":"text"},{"name":"name","value":"firstName"},{"name":"placeholder","value":"Enter your first name"}]},{"type":"field","label":"Last Name","children":[],"attributes":[{"name":"type","value":"text"},{"name":"name","value":"lastName"},{"name":"placeholder","value":"Enter your last name"}]},{"type":"field","label":"Email Address","children":[],"attributes":[{"name":"type","value":"email"},{"name":"name","value":"email"},{"name":"placeholder","value":"Enter your email address"}]},{"type":"button","label":"Submit","children":[],"attributes":[{"name":"type","value":"submit"}]}],"attributes":[]}", "parsed": { - "attributes": [ - { - "name": "method", - "value": "post", - }, - { - "name": "action", - "value": "/submit-profile", - }, - ], + "attributes": [], "children": [ { "attributes": [ @@ -95,43 +86,55 @@ describe('.parse()', () => { "name": "type", "value": "text", }, + { + "name": "name", + "value": "firstName", + }, { "name": "placeholder", - "value": "Enter your full name", + "value": "Enter your first name", }, ], "children": [], - "label": "Full Name", + "label": "First Name", "type": "field", }, { "attributes": [ { "name": "type", - "value": "email", + "value": "text", + }, + { + "name": "name", + "value": "lastName", }, { "name": "placeholder", - "value": "Enter your email address", + "value": "Enter your last name", }, ], "children": [], - "label": "Email Address", + "label": "Last Name", "type": "field", }, { "attributes": [ { "name": "type", - "value": "tel", + "value": "email", + }, + { + "name": "name", + "value": "email", }, { "name": "placeholder", - "value": "Enter your phone number", + "value": "Enter your email address", }, ], "children": [], - "label": "Phone Number", + "label": "Email Address", "type": "field", }, { @@ -467,22 +470,313 @@ describe('.parse()', () => { expect(completion.choices[0]?.message).toMatchInlineSnapshot(` { - "content": "{"person1":{"name":"Jane Doe","phone_number":"+1234567890","roles":["other"],"description":"Engineer at OpenAI. Email: jane@openai.com"},"person2":{"name":"John Smith","phone_number":"+0987654321","differentField":"Engineer at OpenAI. Email: john@openai.com"}}", + "content": "{"person1":{"name":"Jane Doe","phone_number":".","roles":["other"],"description":"Engineer at OpenAI, born Nov 16, contact email: jane@openai.com"},"person2":{"name":"John Smith","phone_number":"john@openai.com","differentField":"Engineer at OpenAI, born March 1."}}", "parsed": { "person1": { - "description": "Engineer at OpenAI. Email: jane@openai.com", + "description": "Engineer at OpenAI, born Nov 16, contact email: jane@openai.com", "name": "Jane Doe", - "phone_number": "+1234567890", + "phone_number": ".", "roles": [ "other", ], }, "person2": { - "differentField": "Engineer at OpenAI. Email: john@openai.com", + "differentField": "Engineer at OpenAI, born March 1.", "name": "John Smith", - "phone_number": "+0987654321", + "phone_number": "john@openai.com", + }, + }, + "refusal": null, + "role": "assistant", + "tool_calls": [], + } + `); + }); + + test('nested schema extraction', async () => { + // optional object that can be on each field, mark it as nullable to comply with structured output restrictions + const metadata = z.nullable( + z.object({ + foo: z.string(), + }), + ); + + // union element a + const fieldA = z.object({ + type: z.literal('string'), + name: z.string(), + metadata, + }); + + // union element b, both referring to above nullable object + const fieldB = z.object({ + type: z.literal('number'), + metadata, + }); + + // top level input object with array of union element + const model = z.object({ + name: z.string(), + fields: z.array(z.union([fieldA, fieldB])), + }); + + expect(zodResponseFormat(model, 'query').json_schema.schema).toMatchInlineSnapshot(` + { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "definitions": { + "contactPerson_properties_person1_properties_name": { + "type": "string", + }, + "contactPerson_properties_person1_properties_phone_number": { + "nullable": true, + "type": "string", + }, + "query": { + "additionalProperties": false, + "properties": { + "fields": { + "items": { + "anyOf": [ + { + "additionalProperties": false, + "properties": { + "metadata": { + "anyOf": [ + { + "additionalProperties": false, + "properties": { + "foo": { + "type": "string", + }, + }, + "required": [ + "foo", + ], + "type": "object", + }, + { + "type": "null", + }, + ], + }, + "name": { + "type": "string", + }, + "type": { + "const": "string", + "type": "string", + }, + }, + "required": [ + "type", + "name", + "metadata", + ], + "type": "object", + }, + { + "additionalProperties": false, + "properties": { + "metadata": { + "$ref": "#/definitions/query_properties_fields_items_anyOf_0_properties_metadata", + }, + "type": { + "const": "number", + "type": "string", + }, + }, + "required": [ + "type", + "metadata", + ], + "type": "object", + }, + ], + }, + "type": "array", + }, + "name": { + "type": "string", + }, + }, + "required": [ + "name", + "fields", + ], + "type": "object", + }, + "query_properties_fields_items_anyOf_0_properties_metadata": { + "anyOf": [ + { + "$ref": "#/definitions/query_properties_fields_items_anyOf_0_properties_metadata_anyOf_0", + }, + { + "type": "null", + }, + ], + }, + }, + "properties": { + "fields": { + "items": { + "anyOf": [ + { + "additionalProperties": false, + "properties": { + "metadata": { + "anyOf": [ + { + "additionalProperties": false, + "properties": { + "foo": { + "type": "string", + }, + }, + "required": [ + "foo", + ], + "type": "object", + }, + { + "type": "null", + }, + ], + }, + "name": { + "type": "string", + }, + "type": { + "const": "string", + "type": "string", + }, + }, + "required": [ + "type", + "name", + "metadata", + ], + "type": "object", + }, + { + "additionalProperties": false, + "properties": { + "metadata": { + "$ref": "#/definitions/query_properties_fields_items_anyOf_0_properties_metadata", + }, + "type": { + "const": "number", + "type": "string", + }, + }, + "required": [ + "type", + "metadata", + ], + "type": "object", + }, + ], + }, + "type": "array", + }, + "name": { + "type": "string", }, }, + "required": [ + "name", + "fields", + ], + "type": "object", + } + `); + + const completion = await makeSnapshotRequest( + (openai) => + openai.beta.chat.completions.parse({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'system', + content: + "You are a helpful assistant. Generate a data model according to the user's instructions.", + }, + { role: 'user', content: 'create a todo app data model' }, + ], + response_format: zodResponseFormat(model, 'query'), + }), + 2, + ); + + expect(completion.choices[0]?.message).toMatchInlineSnapshot(` + { + "content": "{"name":"TodoApp","fields":[{"type":"string","name":"taskId","metadata":{"foo":"unique identifier for each task"}},{"type":"string","name":"title","metadata":{"foo":"title of the task"}},{"type":"string","name":"description","metadata":{"foo":"detailed description of the task. This is optional."}},{"type":"string","name":"status","metadata":{"foo":"status of the task, e.g., pending, completed, etc."}},{"type":"string","name":"dueDate","metadata":null},{"type":"string","name":"priority","metadata":{"foo":"priority level of the task, e.g., low, medium, high"}},{"type":"string","name":"creationDate","metadata":{"foo":"date when the task was created"}},{"type":"string","name":"lastModifiedDate","metadata":{"foo":"date when the task was last modified"}},{"type":"string","name":"tags","metadata":{"foo":"tags associated with the task, for categorization"}}]}", + "parsed": { + "fields": [ + { + "metadata": { + "foo": "unique identifier for each task", + }, + "name": "taskId", + "type": "string", + }, + { + "metadata": { + "foo": "title of the task", + }, + "name": "title", + "type": "string", + }, + { + "metadata": { + "foo": "detailed description of the task. This is optional.", + }, + "name": "description", + "type": "string", + }, + { + "metadata": { + "foo": "status of the task, e.g., pending, completed, etc.", + }, + "name": "status", + "type": "string", + }, + { + "metadata": null, + "name": "dueDate", + "type": "string", + }, + { + "metadata": { + "foo": "priority level of the task, e.g., low, medium, high", + }, + "name": "priority", + "type": "string", + }, + { + "metadata": { + "foo": "date when the task was created", + }, + "name": "creationDate", + "type": "string", + }, + { + "metadata": { + "foo": "date when the task was last modified", + }, + "name": "lastModifiedDate", + "type": "string", + }, + { + "metadata": { + "foo": "tags associated with the task, for categorization", + }, + "name": "tags", + "type": "string", + }, + ], + "name": "TodoApp", + }, "refusal": null, "role": "assistant", "tool_calls": [], From 925a93062924f11759339b5f9318175c6c68e4cf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 18:49:52 +0000 Subject: [PATCH 199/533] release: 4.55.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d34e87f46..04004d7d1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.55.3" + ".": "4.55.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index fefb710b2..d43fd4772 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.55.4 (2024-08-09) + +Full Changelog: [v4.55.3...v4.55.4](https://github.com/openai/openai-node/compare/v4.55.3...v4.55.4) + +### Bug Fixes + +* **helpers/zod:** nested union schema extraction ([#979](https://github.com/openai/openai-node/issues/979)) ([31b05aa](https://github.com/openai/openai-node/commit/31b05aa6fa0445141ae17a1b1eff533b83735f3a)) + + +### Chores + +* **ci:** bump prism mock server version ([#982](https://github.com/openai/openai-node/issues/982)) ([7442643](https://github.com/openai/openai-node/commit/7442643e8445eea15da54843a7c9d7580a402979)) +* **ci:** codeowners file ([#980](https://github.com/openai/openai-node/issues/980)) ([17a42b2](https://github.com/openai/openai-node/commit/17a42b2f6e2de2dce338358a48f6d7d4ed723f6f)) + ## 4.55.3 (2024-08-08) Full Changelog: [v4.55.2...v4.55.3](https://github.com/openai/openai-node/compare/v4.55.2...v4.55.3) diff --git a/README.md b/README.md index 13f449a29..50f7f09e2 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.55.3/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.55.4/mod.ts'; ``` diff --git a/package.json b/package.json index 31f5bc16a..9110c179b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.55.3", + "version": "4.55.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index cd4cbd4f5..79fd58ea4 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.55.3/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.55.4/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index ec9119c18..f4a031b0b 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.55.3'; // x-release-please-version +export const VERSION = '4.55.4'; // x-release-please-version From 7e993db2ee1bd00ab556638c4f75949a53b56abb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 19:47:41 +0000 Subject: [PATCH 200/533] chore(examples): minor formatting changes (#987) --- tests/api-resources/beta/assistants.test.ts | 2 +- .../beta/threads/runs/runs.test.ts | 12 ++++++------ .../api-resources/beta/threads/threads.test.ts | 18 +++++++++--------- tests/api-resources/chat/completions.test.ts | 8 ++++---- .../fine-tuning/jobs/jobs.test.ts | 6 +++--- tests/api-resources/images.test.ts | 6 +++--- 6 files changed, 26 insertions(+), 26 deletions(-) diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index 13fec377d..fdc325254 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -34,7 +34,7 @@ describe('resource assistants', () => { file_search: { vector_store_ids: ['string'], vector_stores: [ - { file_ids: ['string', 'string', 'string'], chunking_strategy: { type: 'auto' }, metadata: {} }, + { chunking_strategy: { type: 'auto' }, file_ids: ['string', 'string', 'string'], metadata: {} }, ], }, }, diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index f6a7dead6..a2fda7757 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -26,8 +26,8 @@ describe('resource runs', () => { additional_instructions: 'additional_instructions', additional_messages: [ { - role: 'user', content: 'string', + role: 'user', attachments: [ { file_id: 'file_id', @@ -57,8 +57,8 @@ describe('resource runs', () => { metadata: {}, }, { - role: 'user', content: 'string', + role: 'user', attachments: [ { file_id: 'file_id', @@ -88,8 +88,8 @@ describe('resource runs', () => { metadata: {}, }, { - role: 'user', content: 'string', + role: 'user', attachments: [ { file_id: 'file_id', @@ -227,9 +227,9 @@ describe('resource runs', () => { test('submitToolOutputs: required and optional params', async () => { const response = await client.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { tool_outputs: [ - { tool_call_id: 'tool_call_id', output: 'output' }, - { tool_call_id: 'tool_call_id', output: 'output' }, - { tool_call_id: 'tool_call_id', output: 'output' }, + { output: 'output', tool_call_id: 'tool_call_id' }, + { output: 'output', tool_call_id: 'tool_call_id' }, + { output: 'output', tool_call_id: 'tool_call_id' }, ], stream: false, }); diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index abf631adb..dc0a94a7d 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -34,8 +34,8 @@ describe('resource threads', () => { { messages: [ { - role: 'user', content: 'string', + role: 'user', attachments: [ { file_id: 'file_id', @@ -65,8 +65,8 @@ describe('resource threads', () => { metadata: {}, }, { - role: 'user', content: 'string', + role: 'user', attachments: [ { file_id: 'file_id', @@ -96,8 +96,8 @@ describe('resource threads', () => { metadata: {}, }, { - role: 'user', content: 'string', + role: 'user', attachments: [ { file_id: 'file_id', @@ -134,8 +134,8 @@ describe('resource threads', () => { vector_store_ids: ['string'], vector_stores: [ { - file_ids: ['string', 'string', 'string'], chunking_strategy: { type: 'auto' }, + file_ids: ['string', 'string', 'string'], metadata: {}, }, ], @@ -220,8 +220,8 @@ describe('resource threads', () => { thread: { messages: [ { - role: 'user', content: 'string', + role: 'user', attachments: [ { file_id: 'file_id', @@ -251,8 +251,8 @@ describe('resource threads', () => { metadata: {}, }, { - role: 'user', content: 'string', + role: 'user', attachments: [ { file_id: 'file_id', @@ -282,8 +282,8 @@ describe('resource threads', () => { metadata: {}, }, { - role: 'user', content: 'string', + role: 'user', attachments: [ { file_id: 'file_id', @@ -313,16 +313,16 @@ describe('resource threads', () => { metadata: {}, }, ], + metadata: {}, tool_resources: { code_interpreter: { file_ids: ['string', 'string', 'string'] }, file_search: { vector_store_ids: ['string'], vector_stores: [ - { file_ids: ['string', 'string', 'string'], chunking_strategy: { type: 'auto' }, metadata: {} }, + { chunking_strategy: { type: 'auto' }, file_ids: ['string', 'string', 'string'], metadata: {} }, ], }, }, - metadata: {}, }, tool_choice: 'none', tool_resources: { diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 5cdd1e670..2179c52c3 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -29,7 +29,7 @@ describe('resource completions', () => { model: 'gpt-4o', frequency_penalty: -2, function_call: 'none', - functions: [{ description: 'description', name: 'name', parameters: { foo: 'bar' } }], + functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], logit_bias: { foo: 0 }, logprobs: true, max_tokens: 0, @@ -46,16 +46,16 @@ describe('resource completions', () => { tool_choice: 'none', tools: [ { + function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, type: 'function', - function: { description: 'description', name: 'name', parameters: { foo: 'bar' }, strict: true }, }, { + function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, type: 'function', - function: { description: 'description', name: 'name', parameters: { foo: 'bar' }, strict: true }, }, { + function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, type: 'function', - function: { description: 'description', name: 'name', parameters: { foo: 'bar' }, strict: true }, }, ], top_logprobs: 0, diff --git a/tests/api-resources/fine-tuning/jobs/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts index e683dfe3e..646c2f5cf 100644 --- a/tests/api-resources/fine-tuning/jobs/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -33,8 +33,8 @@ describe('resource jobs', () => { type: 'wandb', wandb: { project: 'my-wandb-project', - name: 'name', entity: 'entity', + name: 'name', tags: ['custom-tag', 'custom-tag', 'custom-tag'], }, }, @@ -42,8 +42,8 @@ describe('resource jobs', () => { type: 'wandb', wandb: { project: 'my-wandb-project', - name: 'name', entity: 'entity', + name: 'name', tags: ['custom-tag', 'custom-tag', 'custom-tag'], }, }, @@ -51,8 +51,8 @@ describe('resource jobs', () => { type: 'wandb', wandb: { project: 'my-wandb-project', - name: 'name', entity: 'entity', + name: 'name', tags: ['custom-tag', 'custom-tag', 'custom-tag'], }, }, diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index 43e67b030..88eb97a93 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -28,7 +28,7 @@ describe('resource images', () => { model: 'dall-e-2', n: 1, response_format: 'url', - size: '1024x1024', + size: '256x256', user: 'user-1234', }); }); @@ -55,7 +55,7 @@ describe('resource images', () => { model: 'dall-e-2', n: 1, response_format: 'url', - size: '1024x1024', + size: '256x256', user: 'user-1234', }); }); @@ -78,7 +78,7 @@ describe('resource images', () => { n: 1, quality: 'standard', response_format: 'url', - size: '1024x1024', + size: '256x256', style: 'vivid', user: 'user-1234', }); From 06016386f6c8fc270ccbfd6039e18833fb8fa70f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 21:25:22 +0000 Subject: [PATCH 201/533] chore: sync openapi url (#989) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index cad2c64cd..2371b7b8d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-97797a9363b9960b5f2fbdc84426a2b91e75533ecd409fe99e37c231180a4339.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-285bce7dcdae7eea5fe84a8d6e5af2c1473d65ea193109370fb2257851eef7eb.yml From d1c40090e7797f3f57f213784996d21a97dc3ec6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 21:25:47 +0000 Subject: [PATCH 202/533] release: 4.55.5 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 14 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 04004d7d1..a06275749 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.55.4" + ".": "4.55.5" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d43fd4772..c61b8138f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.55.5 (2024-08-12) + +Full Changelog: [v4.55.4...v4.55.5](https://github.com/openai/openai-node/compare/v4.55.4...v4.55.5) + +### Chores + +* **examples:** minor formatting changes ([#987](https://github.com/openai/openai-node/issues/987)) ([8e6b615](https://github.com/openai/openai-node/commit/8e6b615ada09fa4e50dc8e0b5decf662eed19856)) +* sync openapi url ([#989](https://github.com/openai/openai-node/issues/989)) ([02ff1c5](https://github.com/openai/openai-node/commit/02ff1c55b5eefd8b6193ba2bf10dd5515945bd7a)) + ## 4.55.4 (2024-08-09) Full Changelog: [v4.55.3...v4.55.4](https://github.com/openai/openai-node/compare/v4.55.3...v4.55.4) diff --git a/README.md b/README.md index 50f7f09e2..116375c15 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.55.4/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.55.5/mod.ts'; ``` diff --git a/package.json b/package.json index 9110c179b..42e3f6eec 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.55.4", + "version": "4.55.5", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 79fd58ea4..3083cfdab 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.55.4/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.55.5/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index f4a031b0b..1d2b59cb4 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.55.4'; // x-release-please-version +export const VERSION = '4.55.5'; // x-release-please-version From 62f985a351e3fea078226706d6d861a475c8c11e Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 13 Aug 2024 11:26:08 -0400 Subject: [PATCH 203/533] fix(zod-to-json-schema): correct licensing (#986) --- src/_vendor/zod-to-json-schema/LICENSE | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 src/_vendor/zod-to-json-schema/LICENSE diff --git a/src/_vendor/zod-to-json-schema/LICENSE b/src/_vendor/zod-to-json-schema/LICENSE new file mode 100644 index 000000000..a4690a1b6 --- /dev/null +++ b/src/_vendor/zod-to-json-schema/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2020, Stefan Terdell + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. From d486d275d33465ce55047eb77206ec1b7eaaabfd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 13 Aug 2024 15:26:30 +0000 Subject: [PATCH 204/533] release: 4.55.6 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a06275749..3fb120d0e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.55.5" + ".": "4.55.6" } diff --git a/CHANGELOG.md b/CHANGELOG.md index c61b8138f..e213c9bca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.55.6 (2024-08-13) + +Full Changelog: [v4.55.5...v4.55.6](https://github.com/openai/openai-node/compare/v4.55.5...v4.55.6) + +### Bug Fixes + +* **zod-to-json-schema:** correct licensing ([#986](https://github.com/openai/openai-node/issues/986)) ([bd2051e](https://github.com/openai/openai-node/commit/bd2051e501e2ceafcd095f82205c2e668e1d68d7)) + ## 4.55.5 (2024-08-12) Full Changelog: [v4.55.4...v4.55.5](https://github.com/openai/openai-node/compare/v4.55.4...v4.55.5) diff --git a/README.md b/README.md index 116375c15..193fc24fc 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.55.5/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.55.6/mod.ts'; ``` diff --git a/package.json b/package.json index 42e3f6eec..683ae9afb 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.55.5", + "version": "4.55.6", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 3083cfdab..dd1410d53 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.55.5/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.55.6/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 1d2b59cb4..64d6dc952 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.55.5'; // x-release-please-version +export const VERSION = '4.55.6'; // x-release-please-version From 3c1224d2320f4eae8836e324339fe7794aeabee0 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 13 Aug 2024 16:45:30 -0400 Subject: [PATCH 205/533] fix(json-schema): correct handling of nested recursive schemas (#992) * Fix zod to json schema with nested and recursive objects * minor style updates * add an iteration limit --------- Co-authored-by: Zijia Zhang --- src/_vendor/zod-to-json-schema/Options.ts | 31 +-- .../zod-to-json-schema/zodToJsonSchema.ts | 28 ++- tests/lib/__snapshots__/parser.test.ts.snap | 28 +++ tests/lib/parser.test.ts | 182 +++++++++++++++++- 4 files changed, 242 insertions(+), 27 deletions(-) diff --git a/src/_vendor/zod-to-json-schema/Options.ts b/src/_vendor/zod-to-json-schema/Options.ts index a83690e59..a9abfc0e2 100644 --- a/src/_vendor/zod-to-json-schema/Options.ts +++ b/src/_vendor/zod-to-json-schema/Options.ts @@ -38,10 +38,9 @@ export type Options = { openaiStrictMode?: boolean; }; -export const defaultOptions: Options = { +const defaultOptions: Omit = { name: undefined, $refStrategy: 'root', - basePath: ['#'], effectStrategy: 'input', pipeStrategy: 'all', dateStrategy: 'format:date-time', @@ -51,7 +50,6 @@ export const defaultOptions: Options = { definitionPath: 'definitions', target: 'jsonSchema7', strictUnions: false, - definitions: {}, errorMessages: false, markdownDescription: false, patternStrategy: 'escape', @@ -63,13 +61,20 @@ export const defaultOptions: Options = { export const getDefaultOptions = ( options: Partial> | string | undefined, -) => - (typeof options === 'string' ? - { - ...defaultOptions, - name: options, - } - : { - ...defaultOptions, - ...options, - }) as Options; +) => { + // We need to add `definitions` here as we may mutate it + return ( + typeof options === 'string' ? + { + ...defaultOptions, + basePath: ['#'], + definitions: {}, + name: options, + } + : { + ...defaultOptions, + basePath: ['#'], + definitions: {}, + ...options, + }) as Options; +}; diff --git a/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts b/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts index 1c3290008..e0d63d525 100644 --- a/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts +++ b/src/_vendor/zod-to-json-schema/zodToJsonSchema.ts @@ -49,14 +49,28 @@ const zodToJsonSchema = ( } const definitions: Record = {}; + const processedDefinitions = new Set(); - for (const [name, zodSchema] of Object.entries(refs.definitions)) { - definitions[name] = - parseDef( - zodDef(zodSchema), - { ...refs, currentPath: [...refs.basePath, refs.definitionPath, name] }, - true, - ) ?? {}; + // the call to `parseDef()` here might itself add more entries to `.definitions` + // so we need to continually evaluate definitions until we've resolved all of them + // + // we have a generous iteration limit here to avoid blowing up the stack if there + // are any bugs that would otherwise result in us iterating indefinitely + for (let i = 0; i < 500; i++) { + const newDefinitions = Object.entries(refs.definitions).filter( + ([key]) => !processedDefinitions.has(key), + ); + if (newDefinitions.length === 0) break; + + for (const [key, schema] of newDefinitions) { + definitions[key] = + parseDef( + zodDef(schema), + { ...refs, currentPath: [...refs.basePath, refs.definitionPath, key] }, + true, + ) ?? {}; + processedDefinitions.add(key); + } } return definitions; diff --git a/tests/lib/__snapshots__/parser.test.ts.snap b/tests/lib/__snapshots__/parser.test.ts.snap index d98db2345..12e737f5c 100644 --- a/tests/lib/__snapshots__/parser.test.ts.snap +++ b/tests/lib/__snapshots__/parser.test.ts.snap @@ -84,6 +84,34 @@ exports[`.parse() zod nested schema extraction 2`] = ` " `; +exports[`.parse() zod recursive schema extraction 2`] = ` +"{ + "id": "chatcmpl-9vdbw9dekyUSEsSKVQDhTxA2RCxcK", + "object": "chat.completion", + "created": 1723523988, + "model": "gpt-4o-2024-08-06", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "{\\"linked_list\\":{\\"value\\":1,\\"next\\":{\\"value\\":2,\\"next\\":{\\"value\\":3,\\"next\\":{\\"value\\":4,\\"next\\":{\\"value\\":5,\\"next\\":null}}}}}}", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 40, + "completion_tokens": 38, + "total_tokens": 78 + }, + "system_fingerprint": "fp_2a322c9ffc" +} +" +`; + exports[`.parse() zod top-level recursive schemas 1`] = ` "{ "id": "chatcmpl-9uLhw79ArBF4KsQQOlsoE68m6vh6v", diff --git a/tests/lib/parser.test.ts b/tests/lib/parser.test.ts index 331b16895..cbcc2f186 100644 --- a/tests/lib/parser.test.ts +++ b/tests/lib/parser.test.ts @@ -525,13 +525,6 @@ describe('.parse()', () => { "$schema": "http://json-schema.org/draft-07/schema#", "additionalProperties": false, "definitions": { - "contactPerson_properties_person1_properties_name": { - "type": "string", - }, - "contactPerson_properties_person1_properties_phone_number": { - "nullable": true, - "type": "string", - }, "query": { "additionalProperties": false, "properties": { @@ -616,6 +609,21 @@ describe('.parse()', () => { }, ], }, + "query_properties_fields_items_anyOf_0_properties_metadata_anyOf_0": { + "additionalProperties": false, + "properties": { + "foo": { + "$ref": "#/definitions/query_properties_fields_items_anyOf_0_properties_metadata_anyOf_0_properties_foo", + }, + }, + "required": [ + "foo", + ], + "type": "object", + }, + "query_properties_fields_items_anyOf_0_properties_metadata_anyOf_0_properties_foo": { + "type": "string", + }, }, "properties": { "fields": { @@ -783,5 +791,165 @@ describe('.parse()', () => { } `); }); + + test('recursive schema extraction', async () => { + const baseLinkedListNodeSchema = z.object({ + value: z.number(), + }); + type LinkedListNode = z.infer & { + next: LinkedListNode | null; + }; + const linkedListNodeSchema: z.ZodType = baseLinkedListNodeSchema.extend({ + next: z.lazy(() => z.union([linkedListNodeSchema, z.null()])), + }); + + // Define the main schema + const mainSchema = z.object({ + linked_list: linkedListNodeSchema, + }); + + expect(zodResponseFormat(mainSchema, 'query').json_schema.schema).toMatchInlineSnapshot(` + { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "definitions": { + "query": { + "additionalProperties": false, + "properties": { + "linked_list": { + "additionalProperties": false, + "properties": { + "next": { + "anyOf": [ + { + "$ref": "#/definitions/query_properties_linked_list", + }, + { + "type": "null", + }, + ], + }, + "value": { + "type": "number", + }, + }, + "required": [ + "value", + "next", + ], + "type": "object", + }, + }, + "required": [ + "linked_list", + ], + "type": "object", + }, + "query_properties_linked_list": { + "additionalProperties": false, + "properties": { + "next": { + "$ref": "#/definitions/query_properties_linked_list_properties_next", + }, + "value": { + "$ref": "#/definitions/query_properties_linked_list_properties_value", + }, + }, + "required": [ + "value", + "next", + ], + "type": "object", + }, + "query_properties_linked_list_properties_next": { + "anyOf": [ + { + "$ref": "#/definitions/query_properties_linked_list", + }, + { + "type": "null", + }, + ], + }, + "query_properties_linked_list_properties_value": { + "type": "number", + }, + }, + "properties": { + "linked_list": { + "additionalProperties": false, + "properties": { + "next": { + "anyOf": [ + { + "$ref": "#/definitions/query_properties_linked_list", + }, + { + "type": "null", + }, + ], + }, + "value": { + "type": "number", + }, + }, + "required": [ + "value", + "next", + ], + "type": "object", + }, + }, + "required": [ + "linked_list", + ], + "type": "object", + } + `); + + const completion = await makeSnapshotRequest( + (openai) => + openai.beta.chat.completions.parse({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'system', + content: + "You are a helpful assistant. Generate a data model according to the user's instructions.", + }, + { role: 'user', content: 'create a linklist from 1 to 5' }, + ], + response_format: zodResponseFormat(mainSchema, 'query'), + }), + 2, + ); + + expect(completion.choices[0]?.message).toMatchInlineSnapshot(` + { + "content": "{"linked_list":{"value":1,"next":{"value":2,"next":{"value":3,"next":{"value":4,"next":{"value":5,"next":null}}}}}}", + "parsed": { + "linked_list": { + "next": { + "next": { + "next": { + "next": { + "next": null, + "value": 5, + }, + "value": 4, + }, + "value": 3, + }, + "value": 2, + }, + "value": 1, + }, + }, + "refusal": null, + "role": "assistant", + "tool_calls": [], + } + `); + }); }); }); From 19a5ba3b09299375b50b49d43a13bd89214b8575 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 13 Aug 2024 20:45:51 +0000 Subject: [PATCH 206/533] release: 4.55.7 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3fb120d0e..3156504f7 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.55.6" + ".": "4.55.7" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e213c9bca..c9aa0a024 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.55.7 (2024-08-13) + +Full Changelog: [v4.55.6...v4.55.7](https://github.com/openai/openai-node/compare/v4.55.6...v4.55.7) + +### Bug Fixes + +* **json-schema:** correct handling of nested recursive schemas ([#992](https://github.com/openai/openai-node/issues/992)) ([ac309ab](https://github.com/openai/openai-node/commit/ac309abee3419594f45680c7d0ab11e13ce28c5b)) + ## 4.55.6 (2024-08-13) Full Changelog: [v4.55.5...v4.55.6](https://github.com/openai/openai-node/compare/v4.55.5...v4.55.6) diff --git a/README.md b/README.md index 193fc24fc..0d6a9b6ad 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.55.6/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.55.7/mod.ts'; ``` diff --git a/package.json b/package.json index 683ae9afb..b746ad36c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.55.6", + "version": "4.55.7", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index dd1410d53..296fad532 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.55.6/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.55.7/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 64d6dc952..2e8b039e5 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.55.6'; // x-release-please-version +export const VERSION = '4.55.7'; // x-release-please-version From 589ec49f305ecc562ffdb254eff199baf6a06461 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 21:58:43 +0000 Subject: [PATCH 207/533] chore(types): define FilePurpose enum (#997) --- .stats.yml | 2 +- api.md | 1 + src/index.ts | 1 + src/resources/files.ts | 15 ++++++++++++++- src/resources/index.ts | 1 + src/resources/uploads/uploads.ts | 2 +- 6 files changed, 19 insertions(+), 3 deletions(-) diff --git a/.stats.yml b/.stats.yml index 2371b7b8d..185585b67 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-285bce7dcdae7eea5fe84a8d6e5af2c1473d65ea193109370fb2257851eef7eb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8ff62fa1091460d68fbd36d72c17d91b709917bebf2983c9c4de5784bc384a2e.yml diff --git a/api.md b/api.md index 25f08b130..9594a101c 100644 --- a/api.md +++ b/api.md @@ -76,6 +76,7 @@ Types: - FileContent - FileDeleted - FileObject +- FilePurpose Methods: diff --git a/src/index.ts b/src/index.ts index 5f7dffd67..97ffb596d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -277,6 +277,7 @@ export namespace OpenAI { export import FileContent = API.FileContent; export import FileDeleted = API.FileDeleted; export import FileObject = API.FileObject; + export import FilePurpose = API.FilePurpose; export import FileObjectsPage = API.FileObjectsPage; export import FileCreateParams = API.FileCreateParams; export import FileListParams = API.FileListParams; diff --git a/src/resources/files.ts b/src/resources/files.ts index a2d3aaa44..ba01a9041 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -183,6 +183,18 @@ export interface FileObject { status_details?: string; } +/** + * The intended purpose of the uploaded file. + * + * Use "assistants" for + * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and + * [Message](https://platform.openai.com/docs/api-reference/messages) files, + * "vision" for Assistants image file inputs, "batch" for + * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for + * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + */ +export type FilePurpose = 'assistants' | 'batch' | 'fine-tune' | 'vision'; + export interface FileCreateParams { /** * The File object (not file name) to be uploaded. @@ -199,7 +211,7 @@ export interface FileCreateParams { * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). */ - purpose: 'assistants' | 'batch' | 'fine-tune' | 'vision'; + purpose: FilePurpose; } export interface FileListParams { @@ -213,6 +225,7 @@ export namespace Files { export import FileContent = FilesAPI.FileContent; export import FileDeleted = FilesAPI.FileDeleted; export import FileObject = FilesAPI.FileObject; + export import FilePurpose = FilesAPI.FilePurpose; export import FileObjectsPage = FilesAPI.FileObjectsPage; export import FileCreateParams = FilesAPI.FileCreateParams; export import FileListParams = FilesAPI.FileListParams; diff --git a/src/resources/index.ts b/src/resources/index.ts index 8d952e2db..a78808584 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -27,6 +27,7 @@ export { FileContent, FileDeleted, FileObject, + FilePurpose, FileCreateParams, FileListParams, FileObjectsPage, diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index ceb2b6d23..1c3ed708d 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -143,7 +143,7 @@ export interface UploadCreateParams { * See the * [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). */ - purpose: 'assistants' | 'batch' | 'fine-tune' | 'vision'; + purpose: FilesAPI.FilePurpose; } export interface UploadCompleteParams { From 0fc2ae418095f1e84e8bf8de5f6f16d2d00dfc83 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 21:59:08 +0000 Subject: [PATCH 208/533] release: 4.55.8 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3156504f7..50eb779fe 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.55.7" + ".": "4.55.8" } diff --git a/CHANGELOG.md b/CHANGELOG.md index c9aa0a024..d55b31275 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.55.8 (2024-08-15) + +Full Changelog: [v4.55.7...v4.55.8](https://github.com/openai/openai-node/compare/v4.55.7...v4.55.8) + +### Chores + +* **types:** define FilePurpose enum ([#997](https://github.com/openai/openai-node/issues/997)) ([19b941b](https://github.com/openai/openai-node/commit/19b941be4ff3e4fa7e67b820a5aac51e5c8d4f60)) + ## 4.55.7 (2024-08-13) Full Changelog: [v4.55.6...v4.55.7](https://github.com/openai/openai-node/compare/v4.55.6...v4.55.7) diff --git a/README.md b/README.md index 0d6a9b6ad..03364dbe2 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.55.7/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.55.8/mod.ts'; ``` diff --git a/package.json b/package.json index b746ad36c..bf193dcae 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.55.7", + "version": "4.55.8", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 296fad532..e31a7f76e 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.55.7/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.55.8/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 2e8b039e5..f2078f00d 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.55.7'; // x-release-please-version +export const VERSION = '4.55.8'; // x-release-please-version From 0121251f6e68dcc569061ed4a444ace2ffeb8c77 Mon Sep 17 00:00:00 2001 From: Deyaaeldeen Almahallawi Date: Thu, 15 Aug 2024 18:00:45 -0700 Subject: [PATCH 209/533] fix(azure/tts): avoid stripping model param (#999) * [Azure] Fix tts required model issue * add tests * remove console.log call --- src/index.ts | 1 - tests/lib/azure.test.ts | 332 ++++++++++++++++++---------------------- 2 files changed, 148 insertions(+), 185 deletions(-) diff --git a/src/index.ts b/src/index.ts index 97ffb596d..422e26c92 100644 --- a/src/index.ts +++ b/src/index.ts @@ -460,7 +460,6 @@ export class AzureOpenAI extends OpenAI { throw new Error('Expected request body to be an object'); } const model = this._deployment || options.body['model']; - delete options.body['model']; if (model !== undefined && !this.baseURL.includes('/deployments')) { options.path = `/deployments/${model}${options.path}`; } diff --git a/tests/lib/azure.test.ts b/tests/lib/azure.test.ts index 06ca1d464..6bb6e0d1e 100644 --- a/tests/lib/azure.test.ts +++ b/tests/lib/azure.test.ts @@ -278,8 +278,10 @@ describe('azure request building', () => { const client = new AzureOpenAI({ baseURL: 'https://example.com/', apiKey: 'My API Key', apiVersion }); describe('model to deployment mapping', function () { - const testFetch = async (url: RequestInfo): Promise => { - return new Response(JSON.stringify({ url }), { headers: { 'content-type': 'application/json' } }); + const testFetch = async (url: RequestInfo, { body }: RequestInit = {}): Promise => { + return new Response(JSON.stringify({ url, body }), { + headers: { 'content-type': 'application/json' }, + }); }; describe('with client-level deployment', function () { const client = new AzureOpenAI({ @@ -291,127 +293,109 @@ describe('azure request building', () => { }); test('handles batch', async () => { - expect( - await client.batches.create({ - completion_window: '24h', - endpoint: '/v1/chat/completions', - input_file_id: 'file-id', - }), - ).toStrictEqual({ - url: `https://example.com/openai/batches?api-version=${apiVersion}`, - }); + const { url } = (await client.batches.create({ + completion_window: '24h', + endpoint: '/v1/chat/completions', + input_file_id: 'file-id', + })) as any; + expect(url).toStrictEqual(`https://example.com/openai/batches?api-version=${apiVersion}`); }); test('handles completions', async () => { - expect( - await client.completions.create({ - model, - prompt: 'prompt', - }), - ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/completions?api-version=${apiVersion}`, - }); + const { url } = (await client.completions.create({ + model, + prompt: 'prompt', + })) as any; + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/completions?api-version=${apiVersion}`, + ); }); test('handles chat completions', async () => { - expect( - await client.chat.completions.create({ - model, - messages: [{ role: 'system', content: 'Hello' }], - }), - ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/chat/completions?api-version=${apiVersion}`, - }); + const { url } = (await client.chat.completions.create({ + model, + messages: [{ role: 'system', content: 'Hello' }], + })) as any; + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/chat/completions?api-version=${apiVersion}`, + ); }); test('handles embeddings', async () => { - expect( - await client.embeddings.create({ - model, - input: 'input', - }), - ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/embeddings?api-version=${apiVersion}`, - }); + const { url } = (await client.embeddings.create({ + model, + input: 'input', + })) as any; + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/embeddings?api-version=${apiVersion}`, + ); }); test('handles audio translations', async () => { - expect( - await client.audio.translations.create({ - model, - file: { url: 'https://example.com/', blob: () => 0 as any }, - }), - ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/audio/translations?api-version=${apiVersion}`, - }); + const { url } = (await client.audio.translations.create({ + model, + file: { url: 'https://example.com/', blob: () => 0 as any }, + })) as any; + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/audio/translations?api-version=${apiVersion}`, + ); }); test('handles audio transcriptions', async () => { - expect( - await client.audio.transcriptions.create({ - model, - file: { url: 'https://example.com/', blob: () => 0 as any }, - }), - ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/audio/transcriptions?api-version=${apiVersion}`, - }); + const { url } = (await client.audio.transcriptions.create({ + model, + file: { url: 'https://example.com/', blob: () => 0 as any }, + })) as any; + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/audio/transcriptions?api-version=${apiVersion}`, + ); }); test('handles text to speech', async () => { - expect( - await ( - await client.audio.speech.create({ - model, - input: '', - voice: 'alloy', - }) - ).json(), - ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/audio/speech?api-version=${apiVersion}`, - }); + const { url, body } = await ( + await client.audio.speech.create({ + model, + input: '', + voice: 'alloy', + }) + ).json(); + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/audio/speech?api-version=${apiVersion}`, + ); + expect(body).toMatch(new RegExp(`"model": "${model}"`)); }); test('handles image generation', async () => { - expect( - await client.images.generate({ - model, - prompt: 'prompt', - }), - ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/images/generations?api-version=${apiVersion}`, - }); + const { url } = (await client.images.generate({ + model, + prompt: 'prompt', + })) as any; + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/images/generations?api-version=${apiVersion}`, + ); }); test('handles assistants', async () => { - expect( - await client.beta.assistants.create({ - model, - }), - ).toStrictEqual({ - url: `https://example.com/openai/assistants?api-version=${apiVersion}`, - }); + const { url } = (await client.beta.assistants.create({ + model, + })) as any; + expect(url).toStrictEqual(`https://example.com/openai/assistants?api-version=${apiVersion}`); }); test('handles files', async () => { - expect( - await client.files.create({ - file: { url: 'https://example.com/', blob: () => 0 as any }, - purpose: 'assistants', - }), - ).toStrictEqual({ - url: `https://example.com/openai/files?api-version=${apiVersion}`, - }); + const { url } = (await client.files.create({ + file: { url: 'https://example.com/', blob: () => 0 as any }, + purpose: 'assistants', + })) as any; + expect(url).toStrictEqual(`https://example.com/openai/files?api-version=${apiVersion}`); }); test('handles fine tuning', async () => { - expect( - await client.fineTuning.jobs.create({ - model, - training_file: '', - }), - ).toStrictEqual({ - url: `https://example.com/openai/fine_tuning/jobs?api-version=${apiVersion}`, - }); + const { url } = (await client.fineTuning.jobs.create({ + model, + training_file: '', + })) as any; + expect(url).toStrictEqual(`https://example.com/openai/fine_tuning/jobs?api-version=${apiVersion}`); }); }); @@ -424,127 +408,107 @@ describe('azure request building', () => { }); test('handles batch', async () => { - expect( - await client.batches.create({ - completion_window: '24h', - endpoint: '/v1/chat/completions', - input_file_id: 'file-id', - }), - ).toStrictEqual({ - url: `https://example.com/openai/batches?api-version=${apiVersion}`, - }); + const { url } = (await client.batches.create({ + completion_window: '24h', + endpoint: '/v1/chat/completions', + input_file_id: 'file-id', + })) as any; + expect(url).toStrictEqual(`https://example.com/openai/batches?api-version=${apiVersion}`); }); test('handles completions', async () => { - expect( - await client.completions.create({ - model: deployment, - prompt: 'prompt', - }), - ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/completions?api-version=${apiVersion}`, - }); + const { url } = (await client.completions.create({ + model: deployment, + prompt: 'prompt', + })) as any; + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/completions?api-version=${apiVersion}`, + ); }); test('handles chat completions', async () => { - expect( - await client.chat.completions.create({ - model: deployment, - messages: [{ role: 'system', content: 'Hello' }], - }), - ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/chat/completions?api-version=${apiVersion}`, - }); + const { url } = (await client.chat.completions.create({ + model: deployment, + messages: [{ role: 'system', content: 'Hello' }], + })) as any; + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/chat/completions?api-version=${apiVersion}`, + ); }); test('handles embeddings', async () => { - expect( - await client.embeddings.create({ - model: deployment, - input: 'input', - }), - ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/embeddings?api-version=${apiVersion}`, - }); + const { url } = (await client.embeddings.create({ + model: deployment, + input: 'input', + })) as any; + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/embeddings?api-version=${apiVersion}`, + ); }); test('Audio translations is not handled', async () => { - expect( - await client.audio.translations.create({ - model: deployment, - file: { url: 'https://example.com/', blob: () => 0 as any }, - }), - ).toStrictEqual({ - url: `https://example.com/openai/audio/translations?api-version=${apiVersion}`, - }); + const { url } = (await client.audio.translations.create({ + model: deployment, + file: { url: 'https://example.com/', blob: () => 0 as any }, + })) as any; + expect(url).toStrictEqual(`https://example.com/openai/audio/translations?api-version=${apiVersion}`); }); test('Audio transcriptions is not handled', async () => { - expect( - await client.audio.transcriptions.create({ - model: deployment, - file: { url: 'https://example.com/', blob: () => 0 as any }, - }), - ).toStrictEqual({ - url: `https://example.com/openai/audio/transcriptions?api-version=${apiVersion}`, - }); + const { url } = (await client.audio.transcriptions.create({ + model: deployment, + file: { url: 'https://example.com/', blob: () => 0 as any }, + })) as any; + expect(url).toStrictEqual( + `https://example.com/openai/audio/transcriptions?api-version=${apiVersion}`, + ); }); test('handles text to speech', async () => { - expect( - await ( - await client.audio.speech.create({ - model: deployment, - input: '', - voice: 'alloy', - }) - ).json(), - ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/audio/speech?api-version=${apiVersion}`, - }); + const { url, body } = await ( + await client.audio.speech.create({ + model: deployment, + input: '', + voice: 'alloy', + }) + ).json(); + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/audio/speech?api-version=${apiVersion}`, + ); + expect(body).toMatch(new RegExp(`"model": "${deployment}"`)); }); test('handles image generation', async () => { - expect( - await client.images.generate({ - model: deployment, - prompt: 'prompt', - }), - ).toStrictEqual({ - url: `https://example.com/openai/deployments/${deployment}/images/generations?api-version=${apiVersion}`, - }); + const { url } = (await client.images.generate({ + model: deployment, + prompt: 'prompt', + })) as any; + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/images/generations?api-version=${apiVersion}`, + ); }); test('handles assistants', async () => { - expect( - await client.beta.assistants.create({ - model, - }), - ).toStrictEqual({ - url: `https://example.com/openai/assistants?api-version=${apiVersion}`, - }); + const { url } = (await client.beta.assistants.create({ + model, + })) as any; + expect(url).toStrictEqual(`https://example.com/openai/assistants?api-version=${apiVersion}`); }); test('handles files', async () => { - expect( - await client.files.create({ - file: { url: 'https://example.com/', blob: () => 0 as any }, - purpose: 'assistants', - }), - ).toStrictEqual({ - url: `https://example.com/openai/files?api-version=${apiVersion}`, - }); + const { url } = (await client.files.create({ + file: { url: 'https://example.com/', blob: () => 0 as any }, + purpose: 'assistants', + })) as any; + expect(url).toStrictEqual(`https://example.com/openai/files?api-version=${apiVersion}`); }); test('handles fine tuning', async () => { - expect( - await client.fineTuning.jobs.create({ - model, - training_file: '', - }), - ).toStrictEqual({ - url: `https://example.com/openai/fine_tuning/jobs?api-version=${apiVersion}`, - }); + const { url } = (await client.fineTuning.jobs.create({ + model: deployment, + training_file: '', + })) as any; + expect(url).toStrictEqual(`https://example.com/openai/fine_tuning/jobs?api-version=${apiVersion}`); }); }); }); From a16873d9011c6aef7a58ba6b1beb69989377abb4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 Aug 2024 01:01:02 +0000 Subject: [PATCH 210/533] release: 4.55.9 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 50eb779fe..174efd398 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.55.8" + ".": "4.55.9" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d55b31275..2bdf09fe1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.55.9 (2024-08-16) + +Full Changelog: [v4.55.8...v4.55.9](https://github.com/openai/openai-node/compare/v4.55.8...v4.55.9) + +### Bug Fixes + +* **azure/tts:** avoid stripping model param ([#999](https://github.com/openai/openai-node/issues/999)) ([c3a7ccd](https://github.com/openai/openai-node/commit/c3a7ccdbd6d9a2576509c2dc6c1605bc73c6dde7)) + ## 4.55.8 (2024-08-15) Full Changelog: [v4.55.7...v4.55.8](https://github.com/openai/openai-node/compare/v4.55.7...v4.55.8) diff --git a/README.md b/README.md index 03364dbe2..c322073f5 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.55.8/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.55.9/mod.ts'; ``` diff --git a/package.json b/package.json index bf193dcae..4f5c0cbd4 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.55.8", + "version": "4.55.9", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index e31a7f76e..317f45e7d 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.55.8/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.55.9/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index f2078f00d..61170c3ee 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.55.8'; // x-release-please-version +export const VERSION = '4.55.9'; // x-release-please-version From f94bef28ac5b7469a8a67ec2f44a3e38e0cbcac2 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 16 Aug 2024 13:40:30 +0000 Subject: [PATCH 211/533] feat(api): add chatgpt-4o-latest model --- src/resources/chat/completions.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 26e7490e0..764bdb129 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -475,7 +475,7 @@ export interface ChatCompletionMessage { * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of * a function that should be called, as generated by the model. */ - function_call?: ChatCompletionMessage.FunctionCall; + function_call?: ChatCompletionMessage.FunctionCall | null; /** * The tool calls generated by the model, such as function calls. From 39731a60b99053d41b1943c85e115c2f23ccb2b9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 Aug 2024 13:40:51 +0000 Subject: [PATCH 212/533] release: 4.56.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 174efd398..4cce37624 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.55.9" + ".": "4.56.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bdf09fe1..b3447cc50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.56.0 (2024-08-16) + +Full Changelog: [v4.55.9...v4.56.0](https://github.com/openai/openai-node/compare/v4.55.9...v4.56.0) + +### Features + +* **api:** add chatgpt-4o-latest model ([edc4398](https://github.com/openai/openai-node/commit/edc43986ba96a0fda48f7eea368efe706f68dcac)) + ## 4.55.9 (2024-08-16) Full Changelog: [v4.55.8...v4.55.9](https://github.com/openai/openai-node/compare/v4.55.8...v4.55.9) diff --git a/README.md b/README.md index c322073f5..04fe6470c 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.55.9/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.56.0/mod.ts'; ``` diff --git a/package.json b/package.json index 4f5c0cbd4..bf3ec6303 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.55.9", + "version": "4.56.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 317f45e7d..6a1fd236b 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.55.9/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.56.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 61170c3ee..03692402f 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.55.9'; // x-release-please-version +export const VERSION = '4.56.0'; // x-release-please-version From 5236bbc16fe4f2f93ee3398299c1d875e60fde64 Mon Sep 17 00:00:00 2001 From: Deyaaeldeen Almahallawi Date: Tue, 20 Aug 2024 15:46:03 -0700 Subject: [PATCH 213/533] [Azure] Refresh AAD token on retry (#1003) * [Azure] Refresh AAD token on retry * add context --- src/index.ts | 8 +++++++- tests/lib/azure.test.ts | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/src/index.ts b/src/index.ts index 422e26c92..0c6c7badb 100644 --- a/src/index.ts +++ b/src/index.ts @@ -485,7 +485,13 @@ export class AzureOpenAI extends OpenAI { } protected override async prepareOptions(opts: Core.FinalRequestOptions): Promise { - if (opts.headers?.['Authorization'] || opts.headers?.['api-key']) { + /** + * The user should provide a bearer token provider if they want + * to use Azure AD authentication. The user shouldn't set the + * Authorization header manually because the header is overwritten + * with the Azure AD token if a bearer token provider is provided. + */ + if (opts.headers?.['api-key']) { return super.prepareOptions(opts); } const token = await this._getAzureADToken(); diff --git a/tests/lib/azure.test.ts b/tests/lib/azure.test.ts index 6bb6e0d1e..064a0098c 100644 --- a/tests/lib/azure.test.ts +++ b/tests/lib/azure.test.ts @@ -254,6 +254,43 @@ describe('instantiate azure client', () => { /The `apiKey` and `azureADTokenProvider` arguments are mutually exclusive; only one can be passed at a time./, ); }); + + test('AAD token is refreshed', async () => { + let fail = true; + const testFetch = async (url: RequestInfo, req: RequestInit | undefined): Promise => { + if (fail) { + fail = false; + return new Response(undefined, { + status: 429, + headers: { + 'Retry-After': '0.1', + }, + }); + } + return new Response( + JSON.stringify({ auth: (req?.headers as Record)['authorization'] }), + { headers: { 'content-type': 'application/json' } }, + ); + }; + let counter = 0; + async function azureADTokenProvider() { + return `token-${counter++}`; + } + const client = new AzureOpenAI({ + baseURL: 'http://localhost:5000/', + azureADTokenProvider, + apiVersion, + fetch: testFetch, + }); + expect( + await client.chat.completions.create({ + model, + messages: [{ role: 'system', content: 'Hello' }], + }), + ).toStrictEqual({ + auth: 'Bearer token-1', + }); + }); }); test('with endpoint', () => { From 52b7cfe3572305671e70d47257cb224233ad79f4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 17:51:17 +0000 Subject: [PATCH 214/533] chore(ci): check for build errors (#1013) --- .github/workflows/ci.yml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 68f80399b..333139a53 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,6 +27,25 @@ jobs: - name: Check types run: ./scripts/lint + + build: + name: build + runs-on: ubuntu-latest + if: github.repository == 'openai/openai-node' + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node + uses: actions/setup-node@v4 + with: + node-version: '18' + + - name: Install dependencies + run: yarn install + + - name: Check build + run: ./scripts/build test: name: test runs-on: ubuntu-latest From 82439072952ebac9fdc82f895063c7996676b1e1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 17:51:42 +0000 Subject: [PATCH 215/533] release: 4.56.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 4cce37624..a1aa971a9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.56.0" + ".": "4.56.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index b3447cc50..9ce651a98 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.56.1 (2024-08-27) + +Full Changelog: [v4.56.0...v4.56.1](https://github.com/openai/openai-node/compare/v4.56.0...v4.56.1) + +### Chores + +* **ci:** check for build errors ([#1013](https://github.com/openai/openai-node/issues/1013)) ([7ff2127](https://github.com/openai/openai-node/commit/7ff21273091a605e05173502654cfb9c90a4382e)) + ## 4.56.0 (2024-08-16) Full Changelog: [v4.55.9...v4.56.0](https://github.com/openai/openai-node/compare/v4.55.9...v4.56.0) diff --git a/README.md b/README.md index 04fe6470c..5bde96318 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.56.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.56.1/mod.ts'; ``` diff --git a/package.json b/package.json index bf3ec6303..d02286c59 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.56.0", + "version": "4.56.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 6a1fd236b..90c555c85 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.56.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.56.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 03692402f..893e19f6a 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.56.0'; // x-release-please-version +export const VERSION = '4.56.1'; // x-release-please-version From 3f12b9243b2534195502f95e588619413875675a Mon Sep 17 00:00:00 2001 From: Darren McElligott <1863428+darrenmce@users.noreply.github.com> Date: Wed, 28 Aug 2024 15:49:57 -0400 Subject: [PATCH 216/533] helpers/zod: fix type on props parameter and add test (#1018) --- src/helpers/zod.ts | 2 +- tests/helpers/zod.test.ts | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/helpers/zod.ts b/src/helpers/zod.ts index 1946b2199..463ef588c 100644 --- a/src/helpers/zod.ts +++ b/src/helpers/zod.ts @@ -58,7 +58,7 @@ function zodToJsonSchema(schema: z.ZodType, options: { name: string }): Record( zodObject: ZodInput, name: string, - props?: Omit, + props?: Omit, ): AutoParseableResponseFormat> { return makeParseableResponseFormat( { diff --git a/tests/helpers/zod.test.ts b/tests/helpers/zod.test.ts index 1ad4b7475..493b4c0c8 100644 --- a/tests/helpers/zod.test.ts +++ b/tests/helpers/zod.test.ts @@ -132,6 +132,18 @@ describe('zodResponseFormat', () => { `); }); + it('allows description field to be passed in', () => { + expect( + zodResponseFormat( + z.object({ + city: z.string(), + }), + 'city', + { description: 'A city' }, + ).json_schema, + ).toHaveProperty('description', 'A city'); + }); + test('kitchen sink types', () => { const Table = z.enum(['orders', 'customers', 'products']); From eb50fc46dd1e70f9dac831ad82379ac6787cffe7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 29 Aug 2024 12:47:29 +0000 Subject: [PATCH 217/533] chore: run tsc as part of lint script (#1020) --- scripts/lint | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/lint b/scripts/lint index 4af1de013..6ba75dfb5 100755 --- a/scripts/lint +++ b/scripts/lint @@ -6,3 +6,6 @@ cd "$(dirname "$0")/.." echo "==> Running eslint" ESLINT_USE_FLAT_CONFIG="false" ./node_modules/.bin/eslint --ext ts,js . + +echo "==> Running tsc" +./node_modules/.bin/tsc --noEmit From 081e21acf9f49c21b0dffece08e7c61b2d90cc6e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 29 Aug 2024 12:47:54 +0000 Subject: [PATCH 218/533] release: 4.56.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a1aa971a9..780c9d947 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.56.1" + ".": "4.56.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ce651a98..b6420ffe3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.56.2 (2024-08-29) + +Full Changelog: [v4.56.1...v4.56.2](https://github.com/openai/openai-node/compare/v4.56.1...v4.56.2) + +### Chores + +* run tsc as part of lint script ([#1020](https://github.com/openai/openai-node/issues/1020)) ([4942347](https://github.com/openai/openai-node/commit/49423472f2b0a0b63961174bedfc00bfd99d47f9)) + ## 4.56.1 (2024-08-27) Full Changelog: [v4.56.0...v4.56.1](https://github.com/openai/openai-node/compare/v4.56.0...v4.56.1) diff --git a/README.md b/README.md index 5bde96318..80a266fb2 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.56.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.56.2/mod.ts'; ``` diff --git a/package.json b/package.json index d02286c59..1fa8e59d4 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.56.1", + "version": "4.56.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 90c555c85..14f03b5e7 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.56.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.56.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 893e19f6a..8d5cfea61 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.56.1'; // x-release-please-version +export const VERSION = '4.56.2'; // x-release-please-version From 619903cbc75df18ab6563bffc164b880970cf2c9 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Thu, 29 Aug 2024 16:14:13 +0000 Subject: [PATCH 219/533] feat(api): add file search result details to run steps (#1023) --- .stats.yml | 2 +- api.md | 3 +- package.json | 4 +- src/index.ts | 7 + src/resources/beta/assistants.ts | 82 +++++++---- src/resources/beta/threads/runs/index.ts | 2 + src/resources/beta/threads/runs/runs.ts | 133 ++++++++++-------- src/resources/beta/threads/runs/steps.ts | 124 +++++++++++++++- .../beta/threads/runs/runs.test.ts | 1 + .../beta/threads/runs/steps.test.ts | 21 ++- tests/stringifyQuery.test.ts | 6 - yarn.lock | 106 ++++++++++++++ 12 files changed, 400 insertions(+), 91 deletions(-) diff --git a/.stats.yml b/.stats.yml index 185585b67..fd4f27136 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8ff62fa1091460d68fbd36d72c17d91b709917bebf2983c9c4de5784bc384a2e.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-1dbac0e95bdb5a89a0dd3d93265475a378214551b7d8c22862928e0d87ace94b.yml diff --git a/api.md b/api.md index 9594a101c..936f64196 100644 --- a/api.md +++ b/api.md @@ -339,6 +339,7 @@ Types: - RunStepDelta - RunStepDeltaEvent - RunStepDeltaMessageDelta +- RunStepInclude - ToolCall - ToolCallDelta - ToolCallDeltaObject @@ -346,7 +347,7 @@ Types: Methods: -- client.beta.threads.runs.steps.retrieve(threadId, runId, stepId) -> RunStep +- client.beta.threads.runs.steps.retrieve(threadId, runId, stepId, { ...params }) -> RunStep - client.beta.threads.runs.steps.list(threadId, runId, { ...params }) -> RunStepsPage ### Messages diff --git a/package.json b/package.json index 1fa8e59d4..e4d595eae 100644 --- a/package.json +++ b/package.json @@ -26,11 +26,13 @@ "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", + "@types/qs": "^6.9.7", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7" + "node-fetch": "^2.6.7", + "qs": "^6.10.3" }, "devDependencies": { "@swc/core": "^1.3.102", diff --git a/src/index.ts b/src/index.ts index 0c6c7badb..c0e527d25 100644 --- a/src/index.ts +++ b/src/index.ts @@ -2,7 +2,10 @@ import * as Errors from './error'; import * as Uploads from './uploads'; + import { type Agent, type RequestInit } from './_shims/index'; +import * as qs from 'qs'; + import * as Core from './core'; import * as Pagination from './pagination'; import * as API from './resources/index'; @@ -183,6 +186,10 @@ export class OpenAI extends Core.APIClient { return { Authorization: `Bearer ${this.apiKey}` }; } + protected override stringifyQuery(query: Record): string { + return qs.stringify(query, { arrayFormat: 'brackets' }); + } + static OpenAI = this; static DEFAULT_TIMEOUT = 600000; // 10 minutes diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 8d07e45b0..924d63d5c 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -441,8 +441,8 @@ export namespace AssistantStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is - * created. + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + * is created. */ export interface ThreadRunStepCreated { /** @@ -455,7 +455,7 @@ export namespace AssistantStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) * moves to an `in_progress` state. */ export interface ThreadRunStepInProgress { @@ -469,8 +469,8 @@ export namespace AssistantStreamEvent { /** * Occurs when parts of a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) are - * being streamed. + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + * are being streamed. */ export interface ThreadRunStepDelta { /** @@ -484,8 +484,8 @@ export namespace AssistantStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is - * completed. + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + * is completed. */ export interface ThreadRunStepCompleted { /** @@ -498,7 +498,7 @@ export namespace AssistantStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) * fails. */ export interface ThreadRunStepFailed { @@ -512,8 +512,8 @@ export namespace AssistantStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is - * cancelled. + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + * is cancelled. */ export interface ThreadRunStepCancelled { /** @@ -526,7 +526,7 @@ export namespace AssistantStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) * expires. */ export interface ThreadRunStepExpired { @@ -658,10 +658,42 @@ export namespace FileSearchTool { * * Note that the file search tool may output fewer than `max_num_results` results. * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) * for more information. */ max_num_results?: number; + + /** + * The ranking options for the file search. + * + * See the + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * for more information. + */ + ranking_options?: FileSearch.RankingOptions; + } + + export namespace FileSearch { + /** + * The ranking options for the file search. + * + * See the + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * for more information. + */ + export interface RankingOptions { + /** + * The ranker to use for the file search. If not specified will use the `auto` + * ranker. + */ + ranker?: 'auto' | 'default_2024_08_21'; + + /** + * The score threshold for the file search. All values must be a floating point + * number between 0 and 1. + */ + score_threshold?: number; + } } } @@ -765,8 +797,8 @@ export namespace MessageStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is - * created. + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + * is created. */ export type RunStepStreamEvent = | RunStepStreamEvent.ThreadRunStepCreated @@ -780,8 +812,8 @@ export type RunStepStreamEvent = export namespace RunStepStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is - * created. + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + * is created. */ export interface ThreadRunStepCreated { /** @@ -794,7 +826,7 @@ export namespace RunStepStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) * moves to an `in_progress` state. */ export interface ThreadRunStepInProgress { @@ -808,8 +840,8 @@ export namespace RunStepStreamEvent { /** * Occurs when parts of a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) are - * being streamed. + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + * are being streamed. */ export interface ThreadRunStepDelta { /** @@ -823,8 +855,8 @@ export namespace RunStepStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is - * completed. + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + * is completed. */ export interface ThreadRunStepCompleted { /** @@ -837,7 +869,7 @@ export namespace RunStepStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) * fails. */ export interface ThreadRunStepFailed { @@ -851,8 +883,8 @@ export namespace RunStepStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is - * cancelled. + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + * is cancelled. */ export interface ThreadRunStepCancelled { /** @@ -865,7 +897,7 @@ export namespace RunStepStreamEvent { /** * Occurs when a - * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) + * [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) * expires. */ export interface ThreadRunStepExpired { diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts index d216195cb..9496f59e1 100644 --- a/src/resources/beta/threads/runs/index.ts +++ b/src/resources/beta/threads/runs/index.ts @@ -14,10 +14,12 @@ export { RunStepDelta, RunStepDeltaEvent, RunStepDeltaMessageDelta, + RunStepInclude, ToolCall, ToolCallDelta, ToolCallDeltaObject, ToolCallsStepDetails, + StepRetrieveParams, StepListParams, RunStepsPage, Steps, diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 9383e70cc..fe3a278e9 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -22,27 +22,33 @@ export class Runs extends APIResource { /** * Create a run. */ - create(threadId: string, body: RunCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; create( threadId: string, - body: RunCreateParamsStreaming, + params: RunCreateParamsNonStreaming, + options?: Core.RequestOptions, + ): APIPromise; + create( + threadId: string, + params: RunCreateParamsStreaming, options?: Core.RequestOptions, ): APIPromise>; create( threadId: string, - body: RunCreateParamsBase, + params: RunCreateParamsBase, options?: Core.RequestOptions, ): APIPromise | Run>; create( threadId: string, - body: RunCreateParams, + params: RunCreateParams, options?: Core.RequestOptions, ): APIPromise | APIPromise> { + const { include, ...body } = params; return this._client.post(`/threads/${threadId}/runs`, { + query: { include }, body, ...options, headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, - stream: body.stream ?? false, + stream: params.stream ?? false, }) as APIPromise | APIPromise>; } @@ -617,74 +623,87 @@ export type RunCreateParams = RunCreateParamsNonStreaming | RunCreateParamsStrea export interface RunCreateParamsBase { /** - * The ID of the + * Body param: The ID of the * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to * execute this run. */ assistant_id: string; /** - * Appends additional instructions at the end of the instructions for the run. This - * is useful for modifying the behavior on a per-run basis without overriding other - * instructions. + * Query param: A list of additional fields to include in the response. Currently + * the only supported value is + * `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + * search result content. + * + * See the + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * for more information. + */ + include?: Array; + + /** + * Body param: Appends additional instructions at the end of the instructions for + * the run. This is useful for modifying the behavior on a per-run basis without + * overriding other instructions. */ additional_instructions?: string | null; /** - * Adds additional messages to the thread before creating the run. + * Body param: Adds additional messages to the thread before creating the run. */ additional_messages?: Array | null; /** - * Overrides the + * Body param: Overrides the * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) * of the assistant. This is useful for modifying the behavior on a per-run basis. */ instructions?: string | null; /** - * The maximum number of completion tokens that may be used over the course of the - * run. The run will make a best effort to use only the number of completion tokens - * specified, across multiple turns of the run. If the run exceeds the number of - * completion tokens specified, the run will end with status `incomplete`. See - * `incomplete_details` for more info. + * Body param: The maximum number of completion tokens that may be used over the + * course of the run. The run will make a best effort to use only the number of + * completion tokens specified, across multiple turns of the run. If the run + * exceeds the number of completion tokens specified, the run will end with status + * `incomplete`. See `incomplete_details` for more info. */ max_completion_tokens?: number | null; /** - * The maximum number of prompt tokens that may be used over the course of the run. - * The run will make a best effort to use only the number of prompt tokens - * specified, across multiple turns of the run. If the run exceeds the number of - * prompt tokens specified, the run will end with status `incomplete`. See - * `incomplete_details` for more info. + * Body param: The maximum number of prompt tokens that may be used over the course + * of the run. The run will make a best effort to use only the number of prompt + * tokens specified, across multiple turns of the run. If the run exceeds the + * number of prompt tokens specified, the run will end with status `incomplete`. + * See `incomplete_details` for more info. */ max_prompt_tokens?: number | null; /** - * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * Body param: Set of 16 key-value pairs that can be attached to an object. This + * can be useful for storing additional information about the object in a + * structured format. Keys can be a maximum of 64 characters long and values can be + * a maxium of 512 characters long. */ metadata?: unknown | null; /** - * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to - * be used to execute this run. If a value is provided here, it will override the - * model associated with the assistant. If not, the model associated with the - * assistant will be used. + * Body param: The ID of the + * [Model](https://platform.openai.com/docs/api-reference/models) to be used to + * execute this run. If a value is provided here, it will override the model + * associated with the assistant. If not, the model associated with the assistant + * will be used. */ model?: (string & {}) | ChatAPI.ChatModel | null; /** - * Whether to enable + * Body param: Whether to enable * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) * during tool use. */ parallel_tool_calls?: boolean; /** - * Specifies the format that the model must output. Compatible with + * Body param: Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. @@ -708,48 +727,50 @@ export interface RunCreateParamsBase { response_format?: ThreadsAPI.AssistantResponseFormatOption | null; /** - * If `true`, returns a stream of events that happen during the Run as server-sent - * events, terminating when the Run enters a terminal state with a `data: [DONE]` - * message. + * Body param: If `true`, returns a stream of events that happen during the Run as + * server-sent events, terminating when the Run enters a terminal state with a + * `data: [DONE]` message. */ stream?: boolean | null; /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - * make the output more random, while lower values like 0.2 will make it more - * focused and deterministic. + * Body param: What sampling temperature to use, between 0 and 2. Higher values + * like 0.8 will make the output more random, while lower values like 0.2 will make + * it more focused and deterministic. */ temperature?: number | null; /** - * Controls which (if any) tool is called by the model. `none` means the model will - * not call any tools and instead generates a message. `auto` is the default value - * and means the model can pick between generating a message or calling one or more - * tools. `required` means the model must call one or more tools before responding - * to the user. Specifying a particular tool like `{"type": "file_search"}` or + * Body param: Controls which (if any) tool is called by the model. `none` means + * the model will not call any tools and instead generates a message. `auto` is the + * default value and means the model can pick between generating a message or + * calling one or more tools. `required` means the model must call one or more + * tools before responding to the user. Specifying a particular tool like + * `{"type": "file_search"}` or * `{"type": "function", "function": {"name": "my_function"}}` forces the model to * call that tool. */ tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null; /** - * Override the tools the assistant can use for this run. This is useful for - * modifying the behavior on a per-run basis. + * Body param: Override the tools the assistant can use for this run. This is + * useful for modifying the behavior on a per-run basis. */ tools?: Array | null; /** - * An alternative to sampling with temperature, called nucleus sampling, where the - * model considers the results of the tokens with top_p probability mass. So 0.1 - * means only the tokens comprising the top 10% probability mass are considered. + * Body param: An alternative to sampling with temperature, called nucleus + * sampling, where the model considers the results of the tokens with top_p + * probability mass. So 0.1 means only the tokens comprising the top 10% + * probability mass are considered. * * We generally recommend altering this or temperature but not both. */ top_p?: number | null; /** - * Controls for how a thread will be truncated prior to the run. Use this to - * control the intial context window of the run. + * Body param: Controls for how a thread will be truncated prior to the run. Use + * this to control the intial context window of the run. */ truncation_strategy?: RunCreateParams.TruncationStrategy | null; } @@ -834,18 +855,18 @@ export namespace RunCreateParams { export interface RunCreateParamsNonStreaming extends RunCreateParamsBase { /** - * If `true`, returns a stream of events that happen during the Run as server-sent - * events, terminating when the Run enters a terminal state with a `data: [DONE]` - * message. + * Body param: If `true`, returns a stream of events that happen during the Run as + * server-sent events, terminating when the Run enters a terminal state with a + * `data: [DONE]` message. */ stream?: false | null; } export interface RunCreateParamsStreaming extends RunCreateParamsBase { /** - * If `true`, returns a stream of events that happen during the Run as server-sent - * events, terminating when the Run enters a terminal state with a `data: [DONE]` - * message. + * Body param: If `true`, returns a stream of events that happen during the Run as + * server-sent events, terminating when the Run enters a terminal state with a + * `data: [DONE]` message. */ stream: true; } @@ -1630,10 +1651,12 @@ export namespace Runs { export import RunStepDelta = StepsAPI.RunStepDelta; export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent; export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta; + export import RunStepInclude = StepsAPI.RunStepInclude; export import ToolCall = StepsAPI.ToolCall; export import ToolCallDelta = StepsAPI.ToolCallDelta; export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject; export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails; export import RunStepsPage = StepsAPI.RunStepsPage; + export import StepRetrieveParams = StepsAPI.StepRetrieveParams; export import StepListParams = StepsAPI.StepListParams; } diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index 09605d458..c076191a3 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -14,9 +14,27 @@ export class Steps extends APIResource { threadId: string, runId: string, stepId: string, + query?: StepRetrieveParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve( + threadId: string, + runId: string, + stepId: string, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve( + threadId: string, + runId: string, + stepId: string, + query: StepRetrieveParams | Core.RequestOptions = {}, options?: Core.RequestOptions, ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.retrieve(threadId, runId, stepId, {}, query); + } return this._client.get(`/threads/${threadId}/runs/${runId}/steps/${stepId}`, { + query, ...options, headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); @@ -229,7 +247,7 @@ export interface FileSearchToolCall { /** * For now, this is always going to be an empty object. */ - file_search: unknown; + file_search: FileSearchToolCall.FileSearch; /** * The type of tool call. This is always going to be `file_search` for this type of @@ -238,6 +256,82 @@ export interface FileSearchToolCall { type: 'file_search'; } +export namespace FileSearchToolCall { + /** + * For now, this is always going to be an empty object. + */ + export interface FileSearch { + /** + * The ranking options for the file search. + */ + ranking_options?: FileSearch.RankingOptions; + + /** + * The results of the file search. + */ + results?: Array; + } + + export namespace FileSearch { + /** + * The ranking options for the file search. + */ + export interface RankingOptions { + /** + * The ranker used for the file search. + */ + ranker: 'default_2024_08_21'; + + /** + * The score threshold for the file search. All values must be a floating point + * number between 0 and 1. + */ + score_threshold: number; + } + + /** + * A result instance of the file search. + */ + export interface Result { + /** + * The ID of the file that result was found in. + */ + file_id: string; + + /** + * The name of the file that result was found in. + */ + file_name: string; + + /** + * The score of the result. All values must be a floating point number between 0 + * and 1. + */ + score: number; + + /** + * The content of the result that was found. The content is only included if + * requested via the include query parameter. + */ + content?: Array; + } + + export namespace Result { + export interface Content { + /** + * The text content of the file. + */ + text?: string; + + /** + * The type of the content. + */ + type?: 'text'; + } + } + } +} + export interface FileSearchToolCallDelta { /** * For now, this is always going to be an empty object. @@ -558,6 +652,8 @@ export namespace RunStepDeltaMessageDelta { } } +export type RunStepInclude = 'step_details.tool_calls[*].file_search.results[*].content'; + /** * Details of the Code Interpreter tool call the run step was involved in. */ @@ -602,6 +698,19 @@ export interface ToolCallsStepDetails { type: 'tool_calls'; } +export interface StepRetrieveParams { + /** + * A list of additional fields to include in the response. Currently the only + * supported value is `step_details.tool_calls[*].file_search.results[*].content` + * to fetch the file search result content. + * + * See the + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * for more information. + */ + include?: Array; +} + export interface StepListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place @@ -611,6 +720,17 @@ export interface StepListParams extends CursorPageParams { */ before?: string; + /** + * A list of additional fields to include in the response. Currently the only + * supported value is `step_details.tool_calls[*].file_search.results[*].content` + * to fetch the file search result content. + * + * See the + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * for more information. + */ + include?: Array; + /** * Sort order by the `created_at` timestamp of the objects. `asc` for ascending * order and `desc` for descending order. @@ -632,10 +752,12 @@ export namespace Steps { export import RunStepDelta = StepsAPI.RunStepDelta; export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent; export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta; + export import RunStepInclude = StepsAPI.RunStepInclude; export import ToolCall = StepsAPI.ToolCall; export import ToolCallDelta = StepsAPI.ToolCallDelta; export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject; export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails; export import RunStepsPage = StepsAPI.RunStepsPage; + export import StepRetrieveParams = StepsAPI.StepRetrieveParams; export import StepListParams = StepsAPI.StepListParams; } diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index a2fda7757..352d775c0 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -23,6 +23,7 @@ describe('resource runs', () => { test('create: required and optional params', async () => { const response = await client.beta.threads.runs.create('thread_id', { assistant_id: 'assistant_id', + include: ['step_details.tool_calls[*].file_search.results[*].content'], additional_instructions: 'additional_instructions', additional_messages: [ { diff --git a/tests/api-resources/beta/threads/runs/steps.test.ts b/tests/api-resources/beta/threads/runs/steps.test.ts index 21487c17b..64cd228ae 100644 --- a/tests/api-resources/beta/threads/runs/steps.test.ts +++ b/tests/api-resources/beta/threads/runs/steps.test.ts @@ -29,6 +29,19 @@ describe('resource steps', () => { ).rejects.toThrow(OpenAI.NotFoundError); }); + test('retrieve: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.beta.threads.runs.steps.retrieve( + 'thread_id', + 'run_id', + 'step_id', + { include: ['step_details.tool_calls[*].file_search.results[*].content'] }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + test('list', async () => { const responsePromise = client.beta.threads.runs.steps.list('thread_id', 'run_id'); const rawResponse = await responsePromise.asResponse(); @@ -53,7 +66,13 @@ describe('resource steps', () => { client.beta.threads.runs.steps.list( 'thread_id', 'run_id', - { after: 'after', before: 'before', limit: 0, order: 'asc' }, + { + after: 'after', + before: 'before', + include: ['step_details.tool_calls[*].file_search.results[*].content'], + limit: 0, + order: 'asc', + }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); diff --git a/tests/stringifyQuery.test.ts b/tests/stringifyQuery.test.ts index 724743f30..e5f3e560a 100644 --- a/tests/stringifyQuery.test.ts +++ b/tests/stringifyQuery.test.ts @@ -20,10 +20,4 @@ describe(stringifyQuery, () => { expect(stringifyQuery(input)).toEqual(expected); }); } - - for (const value of [[], {}, new Date()]) { - it(`${JSON.stringify(value)} -> `, () => { - expect(() => stringifyQuery({ value })).toThrow(`Cannot stringify type ${typeof value}`); - }); - } }); diff --git a/yarn.lock b/yarn.lock index 1b0863df1..c916c0a6a 100644 --- a/yarn.lock +++ b/yarn.lock @@ -881,6 +881,11 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-18.11.18.tgz#8dfb97f0da23c2293e554c5a50d61ef134d7697f" integrity sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA== +"@types/qs@^6.9.7": + version "6.9.15" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.15.tgz#adde8a060ec9c305a82de1babc1056e73bd64dce" + integrity sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg== + "@types/semver@^7.5.0": version "7.5.3" resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.3.tgz#9a726e116beb26c24f1ccd6850201e1246122e04" @@ -1243,6 +1248,17 @@ bundle-name@^3.0.0: dependencies: run-applescript "^5.0.0" +call-bind@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9" + integrity sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w== + dependencies: + es-define-property "^1.0.0" + es-errors "^1.3.0" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + set-function-length "^1.2.1" + callsites@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" @@ -1432,6 +1448,15 @@ default-browser@^4.0.0: execa "^7.1.1" titleize "^3.0.0" +define-data-property@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e" + integrity sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A== + dependencies: + es-define-property "^1.0.0" + es-errors "^1.3.0" + gopd "^1.0.1" + define-lazy-prop@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz#dbb19adfb746d7fc6d734a06b72f4a00d021255f" @@ -1498,6 +1523,18 @@ error-ex@^1.3.1: dependencies: is-arrayish "^0.2.1" +es-define-property@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.0.tgz#c7faefbdff8b2696cf5f46921edfb77cc4ba3845" + integrity sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ== + dependencies: + get-intrinsic "^1.2.4" + +es-errors@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== + escalade@^3.1.1: version "3.1.1" resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40" @@ -1845,6 +1882,17 @@ get-caller-file@^2.0.5: resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== +get-intrinsic@^1.1.3, get-intrinsic@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd" + integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ== + dependencies: + es-errors "^1.3.0" + function-bind "^1.1.2" + has-proto "^1.0.1" + has-symbols "^1.0.3" + hasown "^2.0.0" + get-package-type@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a" @@ -1910,6 +1958,13 @@ globby@^11.1.0: merge2 "^1.4.1" slash "^3.0.0" +gopd@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c" + integrity sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA== + dependencies: + get-intrinsic "^1.1.3" + graceful-fs@^4.2.9: version "4.2.11" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" @@ -1930,6 +1985,23 @@ has-flag@^4.0.0: resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== +has-property-descriptors@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854" + integrity sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg== + dependencies: + es-define-property "^1.0.0" + +has-proto@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.3.tgz#b31ddfe9b0e6e9914536a6ab286426d0214f77fd" + integrity sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q== + +has-symbols@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" + integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== + hasown@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c" @@ -2747,6 +2819,11 @@ npm-run-path@^5.1.0: dependencies: path-key "^4.0.0" +object-inspect@^1.13.1: + version "1.13.2" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.2.tgz#dea0088467fb991e67af4058147a24824a3043ff" + integrity sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g== + once@^1.3.0: version "1.4.0" resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" @@ -2960,6 +3037,13 @@ pure-rand@^6.0.0: resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.0.4.tgz#50b737f6a925468679bff00ad20eade53f37d5c7" integrity sha512-LA0Y9kxMYv47GIPJy6MI84fqTd2HmYZI83W/kM/SkKfDlajnZYfmXFTxkbY+xSBPkLJxltMa9hIkmdc29eguMA== +qs@^6.10.3: + version "6.13.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.13.0.tgz#6ca3bd58439f7e245655798997787b0d88a51906" + integrity sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg== + dependencies: + side-channel "^1.0.6" + queue-microtask@^1.2.2: version "1.2.3" resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" @@ -3058,6 +3142,18 @@ semver@^7.5.3, semver@^7.5.4: dependencies: lru-cache "^6.0.0" +set-function-length@^1.2.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" + integrity sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg== + dependencies: + define-data-property "^1.1.4" + es-errors "^1.3.0" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + gopd "^1.0.1" + has-property-descriptors "^1.0.2" + shebang-command@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" @@ -3070,6 +3166,16 @@ shebang-regex@^3.0.0: resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== +side-channel@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.6.tgz#abd25fb7cd24baf45466406b1096b7831c9215f2" + integrity sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA== + dependencies: + call-bind "^1.0.7" + es-errors "^1.3.0" + get-intrinsic "^1.2.4" + object-inspect "^1.13.1" + signal-exit@^3.0.3, signal-exit@^3.0.7: version "3.0.7" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" From a01d574a521c0803fc0dfe14f25ae582a1a8e1d5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 29 Aug 2024 18:16:24 +0200 Subject: [PATCH 220/533] fix: install examples deps as part of bootstrap script (#1022) Co-authored-by: Samuel El-Borai --- .github/workflows/ci.yml | 8 ++++---- examples/logprobs.ts | 2 +- scripts/bootstrap | 3 +++ 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 333139a53..d6798e38a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,8 +22,8 @@ jobs: with: node-version: '18' - - name: Install dependencies - run: yarn install + - name: Bootstrap + run: ./scripts/bootstrap - name: Check types run: ./scripts/lint @@ -41,8 +41,8 @@ jobs: with: node-version: '18' - - name: Install dependencies - run: yarn install + - name: Bootstrap + run: ./scripts/bootstrap - name: Check build run: ./scripts/build diff --git a/examples/logprobs.ts b/examples/logprobs.ts index 5a4daf7de..8cf274a14 100755 --- a/examples/logprobs.ts +++ b/examples/logprobs.ts @@ -13,7 +13,7 @@ async function main() { stream: true, logprobs: true, }) - .on('logprob', (logprob) => { + .on('logprobs.content.delta', (logprob) => { console.log(logprob); }); diff --git a/scripts/bootstrap b/scripts/bootstrap index 05dd47a61..033156d3a 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -16,3 +16,6 @@ echo "==> Installing Node dependencies…" PACKAGE_MANAGER=$(command -v yarn >/dev/null 2>&1 && echo "yarn" || echo "npm") $PACKAGE_MANAGER install + +cd "$(dirname "$0")/../examples" +$PACKAGE_MANAGER install \ No newline at end of file From 3ba96a77eb314f908efc615b7333fc0f1729bdb7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:16:44 +0000 Subject: [PATCH 221/533] release: 4.57.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 780c9d947..83a9352d7 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.56.2" + ".": "4.57.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index b6420ffe3..680b164cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.57.0 (2024-08-29) + +Full Changelog: [v4.56.2...v4.57.0](https://github.com/openai/openai-node/compare/v4.56.2...v4.57.0) + +### Features + +* **api:** add file search result details to run steps ([#1023](https://github.com/openai/openai-node/issues/1023)) ([d9acd0a](https://github.com/openai/openai-node/commit/d9acd0a2c52b27983f8db6a8de6a776078b1d41b)) + + +### Bug Fixes + +* install examples deps as part of bootstrap script ([#1022](https://github.com/openai/openai-node/issues/1022)) ([eae8e36](https://github.com/openai/openai-node/commit/eae8e36fd5514eb60773646ec775badde50e783c)) + ## 4.56.2 (2024-08-29) Full Changelog: [v4.56.1...v4.56.2](https://github.com/openai/openai-node/compare/v4.56.1...v4.56.2) diff --git a/README.md b/README.md index 80a266fb2..c648d5a6e 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.56.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.57.0/mod.ts'; ``` diff --git a/package.json b/package.json index e4d595eae..d481813db 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.56.2", + "version": "4.57.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 14f03b5e7..7a646d14d 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.56.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.57.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 8d5cfea61..99e4b86d6 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.56.2'; // x-release-please-version +export const VERSION = '4.57.0'; // x-release-please-version From dfad54d9215d0dd70c0b588a6d54f1cf4645535f Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 3 Sep 2024 13:10:07 +0100 Subject: [PATCH 222/533] chore(internal/tests): workaround bug in recent types/node release --- ecosystem-tests/node-ts-cjs/package-lock.json | 19 +++++-------------- ecosystem-tests/node-ts-cjs/package.json | 5 ++++- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/ecosystem-tests/node-ts-cjs/package-lock.json b/ecosystem-tests/node-ts-cjs/package-lock.json index c9493b515..2f5374e35 100644 --- a/ecosystem-tests/node-ts-cjs/package-lock.json +++ b/ecosystem-tests/node-ts-cjs/package-lock.json @@ -13,7 +13,7 @@ "tsconfig-paths": "^4.0.0" }, "devDependencies": { - "@types/node": "^20.4.2", + "@types/node": "20.4.2", "@types/node-fetch": "^2.6.1", "@types/ws": "^8.5.4", "fastest-levenshtein": "^1.0.16", @@ -1135,13 +1135,10 @@ } }, "node_modules/@types/node": { - "version": "20.11.30", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz", - "integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==", - "dev": true, - "dependencies": { - "undici-types": "~5.26.4" - } + "version": "20.4.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.4.2.tgz", + "integrity": "sha512-Dd0BYtWgnWJKwO1jkmTrzofjK2QXXcai0dmtzvIBhcA+RsG5h8R3xlyta0kGOZRNfL9GuRtb1knmPEhQrePCEw==", + "dev": true }, "node_modules/@types/node-fetch": { "version": "2.6.11", @@ -4233,12 +4230,6 @@ "node": ">=4.2.0" } }, - "node_modules/undici-types": { - "version": "5.26.5", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", - "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", - "dev": true - }, "node_modules/universalify": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", diff --git a/ecosystem-tests/node-ts-cjs/package.json b/ecosystem-tests/node-ts-cjs/package.json index 76f866b0b..039b37a3d 100644 --- a/ecosystem-tests/node-ts-cjs/package.json +++ b/ecosystem-tests/node-ts-cjs/package.json @@ -13,7 +13,7 @@ "tsconfig-paths": "^4.0.0" }, "devDependencies": { - "@types/node": "^20.4.2", + "@types/node": "20.4.2", "@types/node-fetch": "^2.6.1", "@types/ws": "^8.5.4", "fastest-levenshtein": "^1.0.16", @@ -22,5 +22,8 @@ "text-encoding-polyfill": "^0.6.7", "ts-jest": "^29.1.0", "typescript": "4.7.4" + }, + "overrides": { + "@types/node": "20.4.2" } } From 2083545a0f906870c66c9fb59259206d55887eb3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 12:40:03 +0000 Subject: [PATCH 223/533] fix(client): correct File construction from node-fetch Responses (#1029) --- src/uploads.ts | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/uploads.ts b/src/uploads.ts index 081827c9a..a920351cd 100644 --- a/src/uploads.ts +++ b/src/uploads.ts @@ -114,7 +114,12 @@ export async function toFile( const blob = await value.blob(); name ||= new URL(value.url).pathname.split(/[\\/]/).pop() ?? 'unknown_file'; - return new File([blob as any], name, options); + // we need to convert the `Blob` into an array buffer because the `Blob` class + // that `node-fetch` defines is incompatible with the web standard which results + // in `new File` interpreting it as a string instead of binary data. + const data = isBlobLike(blob) ? [(await blob.arrayBuffer()) as any] : [blob]; + + return new File(data, name, options); } const bits = await getBytes(value); From be5269d2ac03dc5cae12455a4650699318e86179 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 3 Sep 2024 14:16:39 +0100 Subject: [PATCH 224/533] fix(assistants): correctly accumulate tool calls when streaming (#1031) * fix(accumulateDelta): AssistantStream accumulateDelta toolCall (#771) * minor style changes --------- Co-authored-by: A4F54B --- src/lib/AssistantStream.ts | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index 32cde3e7a..7c5ffb58e 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -684,6 +684,30 @@ export class AssistantStream accValue.push(...deltaValue); // Use spread syntax for efficient addition continue; } + + for (const deltaEntry of deltaValue) { + if (!Core.isObj(deltaEntry)) { + throw new Error(`Expected array delta entry to be an object but got: ${deltaEntry}`); + } + + const index = deltaEntry['index']; + if (index == null) { + console.error(deltaEntry); + throw new Error('Expected array delta entry to have an `index` property'); + } + + if (typeof index !== 'number') { + throw new Error(`Expected array delta entry \`index\` property to be a number but got ${index}`); + } + + const accEntry = accValue[index]; + if (accEntry == null) { + accValue.push(deltaEntry); + } else { + accValue[index] = this.accumulateDelta(accEntry, deltaEntry); + } + } + continue; } else { throw Error(`Unhandled record type: ${key}, deltaValue: ${deltaValue}, accValue: ${accValue}`); } From 3e10b85f09865ba5a0f31b41605807d19563b6cc Mon Sep 17 00:00:00 2001 From: Young Jin Park Date: Tue, 3 Sep 2024 07:12:11 -0700 Subject: [PATCH 225/533] fix: runTools without stream should not emit user message events (#1005) --- src/lib/ChatCompletionRunner.ts | 8 ++++++-- tests/lib/ChatCompletionRunFunctions.test.ts | 12 ------------ 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/src/lib/ChatCompletionRunner.ts b/src/lib/ChatCompletionRunner.ts index 8139c577b..0b962a110 100644 --- a/src/lib/ChatCompletionRunner.ts +++ b/src/lib/ChatCompletionRunner.ts @@ -63,8 +63,12 @@ export class ChatCompletionRunner extends AbstractChatCompletion return runner; } - override _addMessage(this: ChatCompletionRunner, message: ChatCompletionMessageParam) { - super._addMessage(message); + override _addMessage( + this: ChatCompletionRunner, + message: ChatCompletionMessageParam, + emit: boolean = true, + ) { + super._addMessage(message, emit); if (isAssistantMessage(message) && message.content) { this._emit('content', message.content as string); } diff --git a/tests/lib/ChatCompletionRunFunctions.test.ts b/tests/lib/ChatCompletionRunFunctions.test.ts index cddfe4a5f..b684f204d 100644 --- a/tests/lib/ChatCompletionRunFunctions.test.ts +++ b/tests/lib/ChatCompletionRunFunctions.test.ts @@ -605,7 +605,6 @@ describe('resource completions', () => { await runner.done(); expect(listener.messages).toEqual([ - { role: 'user', content: 'tell me what the weather is like' }, { role: 'assistant', content: null, @@ -700,7 +699,6 @@ describe('resource completions', () => { await runner.done().catch(() => {}); expect(listener.messages).toEqual([ - { role: 'user', content: 'tell me what the weather is like' }, { role: 'assistant', content: null, @@ -855,10 +853,6 @@ describe('resource completions', () => { await runner.done(); expect(listener.messages).toEqual([ - { - role: 'user', - content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', - }, { role: 'assistant', content: null, @@ -1090,10 +1084,6 @@ describe('resource completions', () => { ]); expect(listener.messages).toEqual([ - { - role: 'user', - content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', - }, { role: 'assistant', content: null, @@ -1207,7 +1197,6 @@ describe('resource completions', () => { ]); expect(listener.messages).toEqual([ - { role: 'user', content: 'tell me what the weather is like' }, { role: 'assistant', content: null, @@ -1413,7 +1402,6 @@ describe('resource completions', () => { ]); expect(listener.messages).toEqual([ - { role: 'user', content: 'tell me what the weather is like' }, { role: 'assistant', content: null, From 5e29cb2f2825d5abcdb536d843dcafe4ee980698 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 14:12:33 +0000 Subject: [PATCH 226/533] release: 4.57.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 15 +++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 20 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 83a9352d7..e38854037 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.57.0" + ".": "4.57.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 680b164cb..b38d80c3b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 4.57.1 (2024-09-03) + +Full Changelog: [v4.57.0...v4.57.1](https://github.com/openai/openai-node/compare/v4.57.0...v4.57.1) + +### Bug Fixes + +* **assistants:** correctly accumulate tool calls when streaming ([#1031](https://github.com/openai/openai-node/issues/1031)) ([d935ad3](https://github.com/openai/openai-node/commit/d935ad3fa37b2701f4c9f6e433ada6074280a871)) +* **client:** correct File construction from node-fetch Responses ([#1029](https://github.com/openai/openai-node/issues/1029)) ([22ebdc2](https://github.com/openai/openai-node/commit/22ebdc2ca7d98e0f266110c4cf827e53a0a22026)) +* runTools without stream should not emit user message events ([#1005](https://github.com/openai/openai-node/issues/1005)) ([22ded4d](https://github.com/openai/openai-node/commit/22ded4d549a157482a8de2faf65e92c5be07fa95)) + + +### Chores + +* **internal/tests:** workaround bug in recent types/node release ([3c7bdfd](https://github.com/openai/openai-node/commit/3c7bdfdb373bff77db0e3fecd5d49ddfa4284cd9)) + ## 4.57.0 (2024-08-29) Full Changelog: [v4.56.2...v4.57.0](https://github.com/openai/openai-node/compare/v4.56.2...v4.57.0) diff --git a/README.md b/README.md index c648d5a6e..198a6e584 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.57.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.57.1/mod.ts'; ``` diff --git a/package.json b/package.json index d481813db..e0721e4d9 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.57.0", + "version": "4.57.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 7a646d14d..f4920e507 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.57.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.57.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 99e4b86d6..5967551de 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.57.0'; // x-release-please-version +export const VERSION = '4.57.1'; // x-release-please-version From 09e20df50d7dc97b49a0d3ed4e7256afa605ff55 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 14:21:55 +0000 Subject: [PATCH 227/533] chore(internal): dependency updates (#1035) --- yarn.lock | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/yarn.lock b/yarn.lock index c916c0a6a..c18ab42a0 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1205,12 +1205,12 @@ brace-expansion@^2.0.1: dependencies: balanced-match "^1.0.0" -braces@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" - integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== +braces@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== dependencies: - fill-range "^7.0.1" + fill-range "^7.1.1" browserslist@^4.22.2: version "4.22.2" @@ -1799,10 +1799,10 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" -fill-range@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" - integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== +fill-range@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== dependencies: to-regex-range "^5.0.1" @@ -2710,11 +2710,11 @@ merge2@^1.3.0, merge2@^1.4.1: integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== micromatch@^4.0.4: - version "4.0.5" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" - integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== + version "4.0.8" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== dependencies: - braces "^3.0.2" + braces "^3.0.3" picomatch "^2.3.1" mime-db@1.51.0: From f919b8ea7e456e118a4b8d41151e63531c843857 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 14:22:24 +0000 Subject: [PATCH 228/533] release: 4.57.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e38854037..3530ae9dc 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.57.1" + ".": "4.57.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index b38d80c3b..aa81491dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.57.2 (2024-09-04) + +Full Changelog: [v4.57.1...v4.57.2](https://github.com/openai/openai-node/compare/v4.57.1...v4.57.2) + +### Chores + +* **internal:** dependency updates ([#1035](https://github.com/openai/openai-node/issues/1035)) ([e815fb6](https://github.com/openai/openai-node/commit/e815fb6dee75219563d3a7776774ba1c2984560e)) + ## 4.57.1 (2024-09-03) Full Changelog: [v4.57.0...v4.57.1](https://github.com/openai/openai-node/compare/v4.57.0...v4.57.1) diff --git a/README.md b/README.md index 198a6e584..d062feff0 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.57.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.57.2/mod.ts'; ``` diff --git a/package.json b/package.json index e0721e4d9..17cdac037 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.57.1", + "version": "4.57.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index f4920e507..3a67aa66a 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.57.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.57.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 5967551de..f625b0bf6 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.57.1'; // x-release-please-version +export const VERSION = '4.57.2'; // x-release-please-version From 195fff86be2ec0e799a22bf09a7cbf4175d47a26 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Wed, 4 Sep 2024 19:43:42 +0000 Subject: [PATCH 229/533] chore(internal): minor bump qs version (#1037) --- package.json | 2 +- yarn.lock | 28 ++++++++++++++-------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/package.json b/package.json index 17cdac037..55997318d 100644 --- a/package.json +++ b/package.json @@ -26,7 +26,7 @@ "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", - "@types/qs": "^6.9.7", + "@types/qs": "^6.9.15", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", diff --git a/yarn.lock b/yarn.lock index c18ab42a0..3c7bdb93e 100644 --- a/yarn.lock +++ b/yarn.lock @@ -881,7 +881,7 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-18.11.18.tgz#8dfb97f0da23c2293e554c5a50d61ef134d7697f" integrity sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA== -"@types/qs@^6.9.7": +"@types/qs@^6.9.15": version "6.9.15" resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.15.tgz#adde8a060ec9c305a82de1babc1056e73bd64dce" integrity sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg== @@ -1205,12 +1205,12 @@ brace-expansion@^2.0.1: dependencies: balanced-match "^1.0.0" -braces@^3.0.3: - version "3.0.3" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" - integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== +braces@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" + integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== dependencies: - fill-range "^7.1.1" + fill-range "^7.0.1" browserslist@^4.22.2: version "4.22.2" @@ -1799,10 +1799,10 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" -fill-range@^7.1.1: - version "7.1.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" - integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== +fill-range@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" + integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== dependencies: to-regex-range "^5.0.1" @@ -2710,11 +2710,11 @@ merge2@^1.3.0, merge2@^1.4.1: integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== micromatch@^4.0.4: - version "4.0.8" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" - integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== + version "4.0.5" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" + integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== dependencies: - braces "^3.0.3" + braces "^3.0.2" picomatch "^2.3.1" mime-db@1.51.0: From 1e9808ac68a22eb5fa3640a5b33d27af122f50a0 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 4 Sep 2024 20:46:58 +0100 Subject: [PATCH 230/533] fix(helpers/zod): avoid import issue in certain environments (#1039) --- ecosystem-tests/cli.ts | 5 + ecosystem-tests/node-ts-es2020/index.ts | 36 ++++ .../node-ts-es2020/package-lock.json | 193 ++++++++++++++++++ ecosystem-tests/node-ts-es2020/package.json | 16 ++ .../node-ts-es2020/tsconfig.base.json | 37 ++++ ecosystem-tests/node-ts-es2020/tsconfig.json | 18 ++ .../node-ts-es2020/tsconfig.nodenext.json | 55 +++++ src/helpers/zod.ts | 16 +- 8 files changed, 368 insertions(+), 8 deletions(-) create mode 100644 ecosystem-tests/node-ts-es2020/index.ts create mode 100644 ecosystem-tests/node-ts-es2020/package-lock.json create mode 100644 ecosystem-tests/node-ts-es2020/package.json create mode 100644 ecosystem-tests/node-ts-es2020/tsconfig.base.json create mode 100644 ecosystem-tests/node-ts-es2020/tsconfig.json create mode 100644 ecosystem-tests/node-ts-es2020/tsconfig.nodenext.json diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index e315ccd6c..2d9702112 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -25,6 +25,11 @@ const projectRunners = { 'node-ts-esm': defaultNodeRunner, 'node-ts-esm-web': defaultNodeRunner, 'node-ts-esm-auto': defaultNodeRunner, + 'node-ts-es2020': async () => { + await installPackage(); + await run('npm', ['run', 'tsc']); + await run('npm', ['run', 'main']); + }, 'node-js': async () => { await installPackage(); await run('node', ['test.js']); diff --git a/ecosystem-tests/node-ts-es2020/index.ts b/ecosystem-tests/node-ts-es2020/index.ts new file mode 100644 index 000000000..d92cc2720 --- /dev/null +++ b/ecosystem-tests/node-ts-es2020/index.ts @@ -0,0 +1,36 @@ +import { zodResponseFormat } from 'openai/helpers/zod'; +import OpenAI from 'openai/index'; +import { z } from 'zod'; + +const Step = z.object({ + explanation: z.string(), + output: z.string(), +}); + +const MathResponse = z.object({ + steps: z.array(Step), + final_answer: z.string(), +}); + +async function main() { + const client = new OpenAI(); + + const completion = await client.beta.chat.completions.parse({ + model: 'gpt-4o-2024-08-06', + messages: [ + { role: 'system', content: 'You are a helpful math tutor.' }, + { role: 'user', content: 'solve 8x + 31 = 2' }, + ], + response_format: zodResponseFormat(MathResponse, 'math_response'), + }); + + console.dir(completion, { depth: 5 }); + + const message = completion.choices[0]?.message; + if (message?.parsed) { + console.log(message.parsed.steps); + console.log(`answer: ${message.parsed.final_answer}`); + } +} + +main(); diff --git a/ecosystem-tests/node-ts-es2020/package-lock.json b/ecosystem-tests/node-ts-es2020/package-lock.json new file mode 100644 index 000000000..5ae1d5aa0 --- /dev/null +++ b/ecosystem-tests/node-ts-es2020/package-lock.json @@ -0,0 +1,193 @@ +{ + "name": "node-ts-es2020", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "node-ts-es2020", + "version": "1.0.0", + "dependencies": { + "ts-node": "^10.9.2", + "zod": "^3.23.8" + } + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", + "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + }, + "node_modules/@tsconfig/node10": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", + "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==" + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==" + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==" + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==" + }, + "node_modules/@types/node": { + "version": "20.4.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.4.2.tgz", + "integrity": "sha512-Dd0BYtWgnWJKwO1jkmTrzofjK2QXXcai0dmtzvIBhcA+RsG5h8R3xlyta0kGOZRNfL9GuRtb1knmPEhQrePCEw==", + "peer": true + }, + "node_modules/acorn": { + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", + "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", + "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==" + }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==" + }, + "node_modules/diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==" + }, + "node_modules/ts-node": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/typescript": { + "version": "4.7.4", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.7.4.tgz", + "integrity": "sha512-C0WQT0gezHuw6AdY1M2jxUO83Rjf0HP7Sk1DtXj6j1EwkQNZrHAg2XPWlq62oqEhYvONq5pkC2Y9oPljWToLmQ==", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=4.2.0" + } + }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==" + }, + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/zod": { + "version": "3.23.8", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.23.8.tgz", + "integrity": "sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + } + } +} diff --git a/ecosystem-tests/node-ts-es2020/package.json b/ecosystem-tests/node-ts-es2020/package.json new file mode 100644 index 000000000..ea7e488f5 --- /dev/null +++ b/ecosystem-tests/node-ts-es2020/package.json @@ -0,0 +1,16 @@ +{ + "name": "node-ts-es2020", + "version": "1.0.0", + "main": "index.js", + "scripts": { + "tsc": "tsc && tsc -p tsconfig.nodenext.json && tsc -p node_modules/openai/src/tsconfig.json", + "main": "ts-node index.ts" + }, + "dependencies": { + "ts-node": "^10.9.2", + "zod": "^3.23.8" + }, + "overrides": { + "@types/node": "20.4.2" + } +} diff --git a/ecosystem-tests/node-ts-es2020/tsconfig.base.json b/ecosystem-tests/node-ts-es2020/tsconfig.base.json new file mode 100644 index 000000000..8edad9422 --- /dev/null +++ b/ecosystem-tests/node-ts-es2020/tsconfig.base.json @@ -0,0 +1,37 @@ +{ + "compilerOptions": { + "target": "es2020", + "module": "commonjs", + "lib": ["es2020", "dom"], + "allowJs": false, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "removeComments": true, + "forceConsistentCasingInFileNames": true, + "downlevelIteration": true, + "strict": true, + "moduleResolution": "node", + "paths": {}, + "typeRoots": ["node_modules/@types"], + "types": ["node"], + "allowSyntheticDefaultImports": false, + "experimentalDecorators": true, + "emitDecoratorMetadata": true, + "resolveJsonModule": true, + "incremental": true, + "strictBindCallApply": true, + "strictFunctionTypes": true, + "strictNullChecks": true, + "strictPropertyInitialization": true, + "noImplicitAny": true, + "noImplicitThis": true, + "noImplicitReturns": true, + "noUnusedParameters": true, + "noUnusedLocals": true, + "noFallthroughCasesInSwitch": true, + "preserveSymlinks": true, + "suppressImplicitAnyIndexErrors": true + }, + "exclude": ["node_modules"] +} diff --git a/ecosystem-tests/node-ts-es2020/tsconfig.json b/ecosystem-tests/node-ts-es2020/tsconfig.json new file mode 100644 index 000000000..aa3b68f53 --- /dev/null +++ b/ecosystem-tests/node-ts-es2020/tsconfig.json @@ -0,0 +1,18 @@ +{ + "extends": "./tsconfig.base.json", + "ts-node": { + "swc": true, + "transpileOnly": true + }, + "compilerOptions": { + "declaration": false, + "declarationMap": false, + "allowJs": true, + "checkJs": false, + "outDir": "./dist", + "baseUrl": "./", + "types": ["node", "jest"], + "paths": {} + }, + "include": ["index.ts", "tsconfig.json", "jest.config.ts", ".eslintrc.js"] +} diff --git a/ecosystem-tests/node-ts-es2020/tsconfig.nodenext.json b/ecosystem-tests/node-ts-es2020/tsconfig.nodenext.json new file mode 100644 index 000000000..97df071fb --- /dev/null +++ b/ecosystem-tests/node-ts-es2020/tsconfig.nodenext.json @@ -0,0 +1,55 @@ +{ + "include": ["tests/*.ts", "index.ts"], + "exclude": ["tests/*-shim-errors.ts"], + + "compilerOptions": { + /* Visit https://aka.ms/tsconfig.json to read more about this file */ + /* Projects */ + "incremental": true, + + /* Language and Environment */ + "target": "ES2015", + "lib": ["ES2015"], + "jsx": "react", + + /* Modules */ + "module": "commonjs", + "rootDir": "./", + "moduleResolution": "NodeNext", + "baseUrl": "./", + "paths": { + "~/*": ["*"] + }, + "resolveJsonModule": true, + "composite": true, + + /* Emit */ + "outDir": "node_modules", + "noEmit": true, + + /* Interop Constraints */ + "isolatedModules": true, + "allowSyntheticDefaultImports": true, + /* "esModuleInterop": true, */ + "forceConsistentCasingInFileNames": true, + "allowJs": true, + "checkJs": true, + + /* Experimental Features */ + "experimentalDecorators": true, + + /* Type Checking */ + "strict": true, + "noImplicitAny": true, + "strictNullChecks": true, + "strictFunctionTypes": true, + "strictBindCallApply": true, + "strictPropertyInitialization": true, + "noImplicitThis": true, + "alwaysStrict": true, + "noUncheckedIndexedAccess": true, + "noImplicitOverride": true, + "noPropertyAccessFromIndexSignature": true, + "skipLibCheck": false + } +} diff --git a/src/helpers/zod.ts b/src/helpers/zod.ts index 463ef588c..99b9eb4b0 100644 --- a/src/helpers/zod.ts +++ b/src/helpers/zod.ts @@ -1,5 +1,5 @@ -import { ResponseFormatJSONSchema } from 'openai/resources'; -import type z from 'zod'; +import { ResponseFormatJSONSchema } from '../resources/index'; +import type { infer as zodInfer, ZodType } from 'zod'; import { AutoParseableResponseFormat, AutoParseableTool, @@ -8,7 +8,7 @@ import { } from '../lib/parser'; import { zodToJsonSchema as _zodToJsonSchema } from '../_vendor/zod-to-json-schema'; -function zodToJsonSchema(schema: z.ZodType, options: { name: string }): Record { +function zodToJsonSchema(schema: ZodType, options: { name: string }): Record { return _zodToJsonSchema(schema, { openaiStrictMode: true, name: options.name, @@ -55,11 +55,11 @@ function zodToJsonSchema(schema: z.ZodType, options: { name: string }): Record( +export function zodResponseFormat( zodObject: ZodInput, name: string, props?: Omit, -): AutoParseableResponseFormat> { +): AutoParseableResponseFormat> { return makeParseableResponseFormat( { type: 'json_schema', @@ -79,15 +79,15 @@ export function zodResponseFormat( * automatically by the chat completion `.runTools()` method or automatically * parsed by `.parse()` / `.stream()`. */ -export function zodFunction(options: { +export function zodFunction(options: { name: string; parameters: Parameters; - function?: ((args: z.infer) => unknown | Promise) | undefined; + function?: ((args: zodInfer) => unknown | Promise) | undefined; description?: string | undefined; }): AutoParseableTool<{ arguments: Parameters; name: string; - function: (args: z.infer) => unknown; + function: (args: zodInfer) => unknown; }> { // @ts-expect-error TODO return makeParseableTool( From 0c0b6c9e9681dc7c26ffa3e6101b6473d651e69a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 19:47:18 +0000 Subject: [PATCH 231/533] release: 4.57.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3530ae9dc..5876e9cf1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.57.2" + ".": "4.57.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index aa81491dc..c92fb6e61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.57.3 (2024-09-04) + +Full Changelog: [v4.57.2...v4.57.3](https://github.com/openai/openai-node/compare/v4.57.2...v4.57.3) + +### Bug Fixes + +* **helpers/zod:** avoid import issue in certain environments ([#1039](https://github.com/openai/openai-node/issues/1039)) ([e238daa](https://github.com/openai/openai-node/commit/e238daa7c12f3fb13369f58b9d405365f5efcc8f)) + + +### Chores + +* **internal:** minor bump qs version ([#1037](https://github.com/openai/openai-node/issues/1037)) ([8ec218e](https://github.com/openai/openai-node/commit/8ec218e9efb657927b3c0346822a96872aeaf137)) + ## 4.57.2 (2024-09-04) Full Changelog: [v4.57.1...v4.57.2](https://github.com/openai/openai-node/compare/v4.57.1...v4.57.2) diff --git a/README.md b/README.md index d062feff0..0c89cbc7c 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.57.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.57.3/mod.ts'; ``` diff --git a/package.json b/package.json index 55997318d..5bb52f130 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.57.2", + "version": "4.57.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 3a67aa66a..2aedee39e 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.57.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.57.3/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index f625b0bf6..5292f3437 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.57.2'; // x-release-please-version +export const VERSION = '4.57.3'; // x-release-please-version From 1d475417c43771e2ac2e153b6cd1e445042f74b3 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Thu, 5 Sep 2024 21:26:44 +0000 Subject: [PATCH 232/533] feat(vector store): improve chunking strategy type names (#1041) --- .stats.yml | 2 +- api.md | 7 ++ src/resources/beta/assistants.ts | 44 +------ src/resources/beta/beta.ts | 7 ++ src/resources/beta/index.ts | 7 ++ src/resources/beta/threads/threads.ts | 87 +------------- .../beta/vector-stores/file-batches.ts | 46 +------ src/resources/beta/vector-stores/files.ts | 86 +------------ src/resources/beta/vector-stores/index.ts | 7 ++ .../beta/vector-stores/vector-stores.ts | 113 ++++++++++++------ 10 files changed, 119 insertions(+), 287 deletions(-) diff --git a/.stats.yml b/.stats.yml index fd4f27136..903c15996 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-1dbac0e95bdb5a89a0dd3d93265475a378214551b7d8c22862928e0d87ace94b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-85a85e0c08de456441431c0ae4e9c078cc8f9748c29430b9a9058340db6389ee.yml diff --git a/api.md b/api.md index 936f64196..7fb8f86a6 100644 --- a/api.md +++ b/api.md @@ -199,6 +199,13 @@ Methods: Types: +- AutoFileChunkingStrategyParam +- FileChunkingStrategy +- FileChunkingStrategyParam +- OtherFileChunkingStrategyObject +- StaticFileChunkingStrategy +- StaticFileChunkingStrategyObject +- StaticFileChunkingStrategyParam - VectorStore - VectorStoreDeleted diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 924d63d5c..0dbb076d5 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -8,6 +8,7 @@ import * as Shared from '../shared'; import * as ChatAPI from '../chat/chat'; import * as MessagesAPI from './threads/messages'; import * as ThreadsAPI from './threads/threads'; +import * as VectorStoresAPI from './vector-stores/vector-stores'; import * as RunsAPI from './threads/runs/runs'; import * as StepsAPI from './threads/runs/steps'; import { CursorPage, type CursorPageParams } from '../../pagination'; @@ -1218,9 +1219,9 @@ export namespace AssistantCreateParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. + * strategy. Only applicable if `file_ids` is non-empty. */ - chunking_strategy?: VectorStore.Auto | VectorStore.Static; + chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -1237,45 +1238,6 @@ export namespace AssistantCreateParams { */ metadata?: unknown; } - - export namespace VectorStore { - /** - * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - * `800` and `chunk_overlap_tokens` of `400`. - */ - export interface Auto { - /** - * Always `auto`. - */ - type: 'auto'; - } - - export interface Static { - static: Static.Static; - - /** - * Always `static`. - */ - type: 'static'; - } - - export namespace Static { - export interface Static { - /** - * The number of tokens that overlap between chunks. The default value is `400`. - * - * Note that the overlap must not exceed half of `max_chunk_size_tokens`. - */ - chunk_overlap_tokens: number; - - /** - * The maximum number of tokens in each chunk. The default value is `800`. The - * minimum value is `100` and the maximum value is `4096`. - */ - max_chunk_size_tokens: number; - } - } - } } } } diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 4993d02fb..0bcf217a8 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -15,6 +15,13 @@ export class Beta extends APIResource { export namespace Beta { export import VectorStores = VectorStoresAPI.VectorStores; + export import AutoFileChunkingStrategyParam = VectorStoresAPI.AutoFileChunkingStrategyParam; + export import FileChunkingStrategy = VectorStoresAPI.FileChunkingStrategy; + export import FileChunkingStrategyParam = VectorStoresAPI.FileChunkingStrategyParam; + export import OtherFileChunkingStrategyObject = VectorStoresAPI.OtherFileChunkingStrategyObject; + export import StaticFileChunkingStrategy = VectorStoresAPI.StaticFileChunkingStrategy; + export import StaticFileChunkingStrategyObject = VectorStoresAPI.StaticFileChunkingStrategyObject; + export import StaticFileChunkingStrategyParam = VectorStoresAPI.StaticFileChunkingStrategyParam; export import VectorStore = VectorStoresAPI.VectorStore; export import VectorStoreDeleted = VectorStoresAPI.VectorStoreDeleted; export import VectorStoresPage = VectorStoresAPI.VectorStoresPage; diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index 392be1f35..9fcf805a1 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -37,6 +37,13 @@ export { export { Beta } from './beta'; export { Chat } from './chat/index'; export { + AutoFileChunkingStrategyParam, + FileChunkingStrategy, + FileChunkingStrategyParam, + OtherFileChunkingStrategyObject, + StaticFileChunkingStrategy, + StaticFileChunkingStrategyObject, + StaticFileChunkingStrategyParam, VectorStore, VectorStoreDeleted, VectorStoreCreateParams, diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index b4551da76..c49618f0c 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -10,6 +10,7 @@ import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; import * as ChatAPI from '../../chat/chat'; import * as MessagesAPI from './messages'; +import * as VectorStoresAPI from '../vector-stores/vector-stores'; import * as RunsAPI from './runs/runs'; import { Stream } from '../../../streaming'; @@ -379,9 +380,9 @@ export namespace ThreadCreateParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. + * strategy. Only applicable if `file_ids` is non-empty. */ - chunking_strategy?: VectorStore.Auto | VectorStore.Static; + chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -398,45 +399,6 @@ export namespace ThreadCreateParams { */ metadata?: unknown; } - - export namespace VectorStore { - /** - * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - * `800` and `chunk_overlap_tokens` of `400`. - */ - export interface Auto { - /** - * Always `auto`. - */ - type: 'auto'; - } - - export interface Static { - static: Static.Static; - - /** - * Always `static`. - */ - type: 'static'; - } - - export namespace Static { - export interface Static { - /** - * The number of tokens that overlap between chunks. The default value is `400`. - * - * Note that the overlap must not exceed half of `max_chunk_size_tokens`. - */ - chunk_overlap_tokens: number; - - /** - * The maximum number of tokens in each chunk. The default value is `800`. The - * minimum value is `100` and the maximum value is `4096`. - */ - max_chunk_size_tokens: number; - } - } - } } } } @@ -765,9 +727,9 @@ export namespace ThreadCreateAndRunParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. + * strategy. Only applicable if `file_ids` is non-empty. */ - chunking_strategy?: VectorStore.Auto | VectorStore.Static; + chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -784,45 +746,6 @@ export namespace ThreadCreateAndRunParams { */ metadata?: unknown; } - - export namespace VectorStore { - /** - * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - * `800` and `chunk_overlap_tokens` of `400`. - */ - export interface Auto { - /** - * Always `auto`. - */ - type: 'auto'; - } - - export interface Static { - static: Static.Static; - - /** - * Always `static`. - */ - type: 'static'; - } - - export namespace Static { - export interface Static { - /** - * The number of tokens that overlap between chunks. The default value is `400`. - * - * Note that the overlap must not exceed half of `max_chunk_size_tokens`. - */ - chunk_overlap_tokens: number; - - /** - * The maximum number of tokens in each chunk. The default value is `800`. The - * minimum value is `100` and the maximum value is `4096`. - */ - max_chunk_size_tokens: number; - } - } - } } } } diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts index e4a5c46fe..3436d7575 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/beta/vector-stores/file-batches.ts @@ -9,6 +9,7 @@ import * as Core from '../../../core'; import * as FileBatchesAPI from './file-batches'; import * as FilesAPI from './files'; import { VectorStoreFilesPage } from './files'; +import * as VectorStoresAPI from './vector-stores'; import { type CursorPageParams } from '../../../pagination'; export class FileBatches extends APIResource { @@ -267,50 +268,9 @@ export interface FileBatchCreateParams { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. + * strategy. Only applicable if `file_ids` is non-empty. */ - chunking_strategy?: - | FileBatchCreateParams.AutoChunkingStrategyRequestParam - | FileBatchCreateParams.StaticChunkingStrategyRequestParam; -} - -export namespace FileBatchCreateParams { - /** - * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - * `800` and `chunk_overlap_tokens` of `400`. - */ - export interface AutoChunkingStrategyRequestParam { - /** - * Always `auto`. - */ - type: 'auto'; - } - - export interface StaticChunkingStrategyRequestParam { - static: StaticChunkingStrategyRequestParam.Static; - - /** - * Always `static`. - */ - type: 'static'; - } - - export namespace StaticChunkingStrategyRequestParam { - export interface Static { - /** - * The number of tokens that overlap between chunks. The default value is `400`. - * - * Note that the overlap must not exceed half of `max_chunk_size_tokens`. - */ - chunk_overlap_tokens: number; - - /** - * The maximum number of tokens in each chunk. The default value is `800`. The - * minimum value is `100` and the maximum value is `4096`. - */ - max_chunk_size_tokens: number; - } - } + chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; } export interface FileBatchListFilesParams extends CursorPageParams { diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index c0f695223..f82cd63df 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -4,6 +4,7 @@ import { APIResource } from '../../../resource'; import { sleep, Uploadable, isRequestOptions } from '../../../core'; import * as Core from '../../../core'; import * as FilesAPI from './files'; +import * as VectorStoresAPI from './vector-stores'; import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Files extends APIResource { @@ -220,7 +221,7 @@ export interface VectorStoreFile { /** * The strategy used to chunk the file. */ - chunking_strategy?: VectorStoreFile.Static | VectorStoreFile.Other; + chunking_strategy?: VectorStoresAPI.FileChunkingStrategy; } export namespace VectorStoreFile { @@ -239,44 +240,6 @@ export namespace VectorStoreFile { */ message: string; } - - export interface Static { - static: Static.Static; - - /** - * Always `static`. - */ - type: 'static'; - } - - export namespace Static { - export interface Static { - /** - * The number of tokens that overlap between chunks. The default value is `400`. - * - * Note that the overlap must not exceed half of `max_chunk_size_tokens`. - */ - chunk_overlap_tokens: number; - - /** - * The maximum number of tokens in each chunk. The default value is `800`. The - * minimum value is `100` and the maximum value is `4096`. - */ - max_chunk_size_tokens: number; - } - } - - /** - * This is returned when the chunking strategy is unknown. Typically, this is - * because the file was indexed before the `chunking_strategy` concept was - * introduced in the API. - */ - export interface Other { - /** - * Always `other`. - */ - type: 'other'; - } } export interface VectorStoreFileDeleted { @@ -297,50 +260,9 @@ export interface FileCreateParams { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. + * strategy. Only applicable if `file_ids` is non-empty. */ - chunking_strategy?: - | FileCreateParams.AutoChunkingStrategyRequestParam - | FileCreateParams.StaticChunkingStrategyRequestParam; -} - -export namespace FileCreateParams { - /** - * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - * `800` and `chunk_overlap_tokens` of `400`. - */ - export interface AutoChunkingStrategyRequestParam { - /** - * Always `auto`. - */ - type: 'auto'; - } - - export interface StaticChunkingStrategyRequestParam { - static: StaticChunkingStrategyRequestParam.Static; - - /** - * Always `static`. - */ - type: 'static'; - } - - export namespace StaticChunkingStrategyRequestParam { - export interface Static { - /** - * The number of tokens that overlap between chunks. The default value is `400`. - * - * Note that the overlap must not exceed half of `max_chunk_size_tokens`. - */ - chunk_overlap_tokens: number; - - /** - * The maximum number of tokens in each chunk. The default value is `800`. The - * minimum value is `100` and the maximum value is `4096`. - */ - max_chunk_size_tokens: number; - } - } + chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; } export interface FileListParams extends CursorPageParams { diff --git a/src/resources/beta/vector-stores/index.ts b/src/resources/beta/vector-stores/index.ts index 8fb787ccd..f70215f8f 100644 --- a/src/resources/beta/vector-stores/index.ts +++ b/src/resources/beta/vector-stores/index.ts @@ -1,6 +1,13 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { + AutoFileChunkingStrategyParam, + FileChunkingStrategy, + FileChunkingStrategyParam, + OtherFileChunkingStrategyObject, + StaticFileChunkingStrategy, + StaticFileChunkingStrategyObject, + StaticFileChunkingStrategyParam, VectorStore, VectorStoreDeleted, VectorStoreCreateParams, diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index 343f25953..3c9aa707d 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -83,6 +83,73 @@ export class VectorStores extends APIResource { export class VectorStoresPage extends CursorPage {} +/** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ +export interface AutoFileChunkingStrategyParam { + /** + * Always `auto`. + */ + type: 'auto'; +} + +/** + * The strategy used to chunk the file. + */ +export type FileChunkingStrategy = StaticFileChunkingStrategyObject | OtherFileChunkingStrategyObject; + +/** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. Only applicable if `file_ids` is non-empty. + */ +export type FileChunkingStrategyParam = AutoFileChunkingStrategyParam | StaticFileChunkingStrategyParam; + +/** + * This is returned when the chunking strategy is unknown. Typically, this is + * because the file was indexed before the `chunking_strategy` concept was + * introduced in the API. + */ +export interface OtherFileChunkingStrategyObject { + /** + * Always `other`. + */ + type: 'other'; +} + +export interface StaticFileChunkingStrategy { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; +} + +export interface StaticFileChunkingStrategyObject { + static: StaticFileChunkingStrategy; + + /** + * Always `static`. + */ + type: 'static'; +} + +export interface StaticFileChunkingStrategyParam { + static: StaticFileChunkingStrategy; + + /** + * Always `static`. + */ + type: 'static'; +} + /** * A vector store is a collection of processed files can be used by the * `file_search` tool. @@ -204,7 +271,7 @@ export interface VectorStoreCreateParams { * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. */ - chunking_strategy?: VectorStoreCreateParams.Auto | VectorStoreCreateParams.Static; + chunking_strategy?: FileChunkingStrategyParam; /** * The expiration policy for a vector store. @@ -233,43 +300,6 @@ export interface VectorStoreCreateParams { } export namespace VectorStoreCreateParams { - /** - * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of - * `800` and `chunk_overlap_tokens` of `400`. - */ - export interface Auto { - /** - * Always `auto`. - */ - type: 'auto'; - } - - export interface Static { - static: Static.Static; - - /** - * Always `static`. - */ - type: 'static'; - } - - export namespace Static { - export interface Static { - /** - * The number of tokens that overlap between chunks. The default value is `400`. - * - * Note that the overlap must not exceed half of `max_chunk_size_tokens`. - */ - chunk_overlap_tokens: number; - - /** - * The maximum number of tokens in each chunk. The default value is `800`. The - * minimum value is `100` and the maximum value is `4096`. - */ - max_chunk_size_tokens: number; - } - } - /** * The expiration policy for a vector store. */ @@ -342,6 +372,13 @@ export interface VectorStoreListParams extends CursorPageParams { } export namespace VectorStores { + export import AutoFileChunkingStrategyParam = VectorStoresAPI.AutoFileChunkingStrategyParam; + export import FileChunkingStrategy = VectorStoresAPI.FileChunkingStrategy; + export import FileChunkingStrategyParam = VectorStoresAPI.FileChunkingStrategyParam; + export import OtherFileChunkingStrategyObject = VectorStoresAPI.OtherFileChunkingStrategyObject; + export import StaticFileChunkingStrategy = VectorStoresAPI.StaticFileChunkingStrategy; + export import StaticFileChunkingStrategyObject = VectorStoresAPI.StaticFileChunkingStrategyObject; + export import StaticFileChunkingStrategyParam = VectorStoresAPI.StaticFileChunkingStrategyParam; export import VectorStore = VectorStoresAPI.VectorStore; export import VectorStoreDeleted = VectorStoresAPI.VectorStoreDeleted; export import VectorStoresPage = VectorStoresAPI.VectorStoresPage; From c31ffc0fd6260c02a72e2bcc00f6803520a70648 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 21:11:23 +0000 Subject: [PATCH 233/533] fix(uploads): avoid making redundant memory copies (#1043) --- src/uploads.ts | 8 +++++--- tests/uploads.test.ts | 8 ++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/uploads.ts b/src/uploads.ts index a920351cd..8fd2154d4 100644 --- a/src/uploads.ts +++ b/src/uploads.ts @@ -107,8 +107,10 @@ export async function toFile( // If it's a promise, resolve it. value = await value; - // Use the file's options if there isn't one provided - options ??= isFileLike(value) ? { lastModified: value.lastModified, type: value.type } : {}; + // If we've been given a `File` we don't need to do anything + if (isFileLike(value)) { + return value; + } if (isResponseLike(value)) { const blob = await value.blob(); @@ -126,7 +128,7 @@ export async function toFile( name ||= getName(value) ?? 'unknown_file'; - if (!options.type) { + if (!options?.type) { const type = (bits[0] as any)?.type; if (typeof type === 'string') { options = { ...options, type }; diff --git a/tests/uploads.test.ts b/tests/uploads.test.ts index b40856e29..b64b80285 100644 --- a/tests/uploads.test.ts +++ b/tests/uploads.test.ts @@ -54,4 +54,12 @@ describe('toFile', () => { const file = await toFile(input); expect(file.name).toEqual('uploads.test.ts'); }); + + it('does not copy File objects', async () => { + const input = new File(['foo'], 'input.jsonl', { type: 'jsonl' }); + const file = await toFile(input); + expect(file).toBe(input); + expect(file.name).toEqual('input.jsonl'); + expect(file.type).toBe('jsonl'); + }); }); From 7e044282f0d4dc1c8e6fa2e653f8666f00a6df9e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 5 Sep 2024 21:54:39 +0000 Subject: [PATCH 234/533] release: 4.58.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 5876e9cf1..5531ef5d2 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.57.3" + ".": "4.58.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index c92fb6e61..6ebb5c0b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.58.0 (2024-09-05) + +Full Changelog: [v4.57.3...v4.58.0](https://github.com/openai/openai-node/compare/v4.57.3...v4.58.0) + +### Features + +* **vector store:** improve chunking strategy type names ([#1041](https://github.com/openai/openai-node/issues/1041)) ([471cec3](https://github.com/openai/openai-node/commit/471cec3228886253f07c13a362827a31e9ec7b63)) + + +### Bug Fixes + +* **uploads:** avoid making redundant memory copies ([#1043](https://github.com/openai/openai-node/issues/1043)) ([271297b](https://github.com/openai/openai-node/commit/271297bd32393d4c5663023adf82f8fb19dc3d25)) + ## 4.57.3 (2024-09-04) Full Changelog: [v4.57.2...v4.57.3](https://github.com/openai/openai-node/compare/v4.57.2...v4.57.3) diff --git a/README.md b/README.md index 0c89cbc7c..4d433c25b 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.57.3/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.58.0/mod.ts'; ``` diff --git a/package.json b/package.json index 5bb52f130..d3ed9fc2d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.57.3", + "version": "4.58.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 2aedee39e..23787ceab 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.57.3/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.58.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 5292f3437..96374498a 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.57.3'; // x-release-please-version +export const VERSION = '4.58.0'; // x-release-please-version From 315ca6b1da171619a81922bc9271193dbb931820 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 09:40:12 +0000 Subject: [PATCH 235/533] chore(docs): update browser support information (#1045) --- README.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4d433c25b..23a8684dc 100644 --- a/README.md +++ b/README.md @@ -618,16 +618,22 @@ The following runtimes are supported: - Jest 28 or greater with the `"node"` environment (`"jsdom"` is not supported at this time). - Nitro v2.6 or greater. - Web browsers: disabled by default to avoid exposing your secret API credentials. Enable browser support by explicitly setting `dangerouslyAllowBrowser` to true'. -
- More explanation +
+ More explanation + ### Why is this dangerous? + Enabling the `dangerouslyAllowBrowser` option can be dangerous because it exposes your secret API credentials in the client-side code. Web browsers are inherently less secure than server environments, any user with access to the browser can potentially inspect, extract, and misuse these credentials. This could lead to unauthorized access using your credentials and potentially compromise sensitive data or functionality. + ### When might this not be dangerous? + In certain scenarios where enabling browser support might not pose significant risks: + - Internal Tools: If the application is used solely within a controlled internal environment where the users are trusted, the risk of credential exposure can be mitigated. - Public APIs with Limited Scope: If your API has very limited scope and the exposed credentials do not grant access to sensitive data or critical operations, the potential impact of exposure is reduced. - Development or debugging purpose: Enabling this feature temporarily might be acceptable, provided the credentials are short-lived, aren't also used in production environments, or are frequently rotated. +
Note that React Native is not supported at this time. From 25f1b599fad21f166bcf0f72ca373ed563c302b8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 6 Sep 2024 09:40:37 +0000 Subject: [PATCH 236/533] release: 4.58.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 5531ef5d2..42af9b44f 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.58.0" + ".": "4.58.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ebb5c0b4..cca823075 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.58.1 (2024-09-06) + +Full Changelog: [v4.58.0...v4.58.1](https://github.com/openai/openai-node/compare/v4.58.0...v4.58.1) + +### Chores + +* **docs:** update browser support information ([#1045](https://github.com/openai/openai-node/issues/1045)) ([d326cc5](https://github.com/openai/openai-node/commit/d326cc54a77c450672fbf07d736cec80a9ba72fb)) + ## 4.58.0 (2024-09-05) Full Changelog: [v4.57.3...v4.58.0](https://github.com/openai/openai-node/compare/v4.57.3...v4.58.0) diff --git a/README.md b/README.md index 23a8684dc..0aaa4e984 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.58.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.58.1/mod.ts'; ``` diff --git a/package.json b/package.json index d3ed9fc2d..f4310009e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.58.0", + "version": "4.58.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 23787ceab..314ef998e 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.58.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.58.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 96374498a..ee6349863 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.58.0'; // x-release-please-version +export const VERSION = '4.58.1'; // x-release-please-version From b8dc229ec68a7af9309a2b7bd4241f15901fc35d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 15:30:11 +0000 Subject: [PATCH 237/533] fix(errors): pass message through to APIConnectionError (#1050) --- src/error.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/error.ts b/src/error.ts index 83ddbfafa..87eeea046 100644 --- a/src/error.ts +++ b/src/error.ts @@ -61,7 +61,7 @@ export class APIError extends OpenAIError { headers: Headers | undefined, ) { if (!status) { - return new APIConnectionError({ cause: castToError(errorResponse) }); + return new APIConnectionError({ message, cause: castToError(errorResponse) }); } const error = (errorResponse as Record)?.['error']; @@ -113,7 +113,7 @@ export class APIUserAbortError extends APIError { export class APIConnectionError extends APIError { override readonly status: undefined = undefined; - constructor({ message, cause }: { message?: string; cause?: Error | undefined }) { + constructor({ message, cause }: { message?: string | undefined; cause?: Error | undefined }) { super(undefined, undefined, message || 'Connection error.', undefined); // in some environments the 'cause' property is already declared // @ts-ignore From 36dcce01992183fead79a74949e1432c87f942f5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 19:36:54 +0000 Subject: [PATCH 238/533] chore: better object fallback behaviour for casting errors (#1053) --- src/core.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/core.ts b/src/core.ts index 2d91751d7..a4bb87a32 100644 --- a/src/core.ts +++ b/src/core.ts @@ -994,6 +994,11 @@ const validatePositiveInteger = (name: string, n: unknown): number => { export const castToError = (err: any): Error => { if (err instanceof Error) return err; + if (typeof err === 'object' && err !== null) { + try { + return new Error(JSON.stringify(err)); + } catch {} + } return new Error(err); }; From e7d1fce78f733997808cb3f25991cadb957ca57d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 19:37:22 +0000 Subject: [PATCH 239/533] release: 4.58.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 42af9b44f..c740afe14 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.58.1" + ".": "4.58.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index cca823075..468be63b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.58.2 (2024-09-09) + +Full Changelog: [v4.58.1...v4.58.2](https://github.com/openai/openai-node/compare/v4.58.1...v4.58.2) + +### Bug Fixes + +* **errors:** pass message through to APIConnectionError ([#1050](https://github.com/openai/openai-node/issues/1050)) ([5a34316](https://github.com/openai/openai-node/commit/5a3431672e200a6bc161d39873e914434557801e)) + + +### Chores + +* better object fallback behaviour for casting errors ([#1053](https://github.com/openai/openai-node/issues/1053)) ([b7d4619](https://github.com/openai/openai-node/commit/b7d46190d2bb775145a9a3de6408c38abacfa055)) + ## 4.58.1 (2024-09-06) Full Changelog: [v4.58.0...v4.58.1](https://github.com/openai/openai-node/compare/v4.58.0...v4.58.1) diff --git a/README.md b/README.md index 0aaa4e984..8415ed186 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.58.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.58.2/mod.ts'; ``` diff --git a/package.json b/package.json index f4310009e..b284c38a6 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.58.1", + "version": "4.58.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 314ef998e..ef3867dc2 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.58.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.58.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index ee6349863..f650f76c3 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.58.1'; // x-release-please-version +export const VERSION = '4.58.2'; // x-release-please-version From 04b88383c81cad76847b3d8fe8cbed384221e5cd Mon Sep 17 00:00:00 2001 From: Angelos Petropoulos Date: Wed, 11 Sep 2024 05:11:32 -0500 Subject: [PATCH 240/533] docs(azure): example for custom base URL (#1055) Co-authored-by: Angelos Petropoulos --- src/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/index.ts b/src/index.ts index c0e527d25..36064286d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -381,7 +381,7 @@ export class AzureOpenAI extends OpenAI { * @param {string | undefined} [opts.apiKey=process.env['AZURE_OPENAI_API_KEY'] ?? undefined] * @param {string | undefined} opts.deployment - A model deployment, if given, sets the base client URL to include `/deployments/{deployment}`. * @param {string | null | undefined} [opts.organization=process.env['OPENAI_ORG_ID'] ?? null] - * @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL']] - Sets the base URL for the API. + * @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL']] - Sets the base URL for the API, e.g. `https://example-resource.azure.openai.com/openai/`. * @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out. * @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections. * @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation. From fc549fc033ee3dc5fbb678ea8969a2160336746e Mon Sep 17 00:00:00 2001 From: Scott Addie <10702007+scottaddie@users.noreply.github.com> Date: Wed, 11 Sep 2024 05:12:33 -0500 Subject: [PATCH 241/533] docs(azure): remove locale from docs link (#1054) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8415ed186..31a7776d6 100644 --- a/README.md +++ b/README.md @@ -363,7 +363,7 @@ Error codes are as followed: ## Microsoft Azure OpenAI -To use this library with [Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview), use the `AzureOpenAI` +To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` class instead of the `OpenAI` class. > [!IMPORTANT] From d9dee78c7052649131b3897243366a9b58d9616e Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 11 Sep 2024 11:14:17 +0100 Subject: [PATCH 242/533] feat(structured outputs): support accessing raw responses (#1058) --- src/resources/beta/chat/completions.ts | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts index 96c4118bf..113de4026 100644 --- a/src/resources/beta/chat/completions.ts +++ b/src/resources/beta/chat/completions.ts @@ -59,21 +59,21 @@ export interface ParsedChatCompletion extends ChatCompletion { export type ChatCompletionParseParams = ChatCompletionCreateParamsNonStreaming; export class Completions extends APIResource { - async parse>( + parse>( body: Params, options?: Core.RequestOptions, - ): Promise> { + ): Core.APIPromise> { validateInputTools(body.tools); - const completion = await this._client.chat.completions.create(body, { - ...options, - headers: { - ...options?.headers, - 'X-Stainless-Helper-Method': 'beta.chat.completions.parse', - }, - }); - - return parseChatCompletion(completion, body); + return this._client.chat.completions + .create(body, { + ...options, + headers: { + ...options?.headers, + 'X-Stainless-Helper-Method': 'beta.chat.completions.parse', + }, + }) + ._thenUnwrap((completion) => parseChatCompletion(completion, body)); } /** From 8958d9711b66e0cbacf816b7b4538217016beb0b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 10:14:38 +0000 Subject: [PATCH 243/533] release: 4.59.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c740afe14..99d650398 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.58.2" + ".": "4.59.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 468be63b2..a245938c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.59.0 (2024-09-11) + +Full Changelog: [v4.58.2...v4.59.0](https://github.com/openai/openai-node/compare/v4.58.2...v4.59.0) + +### Features + +* **structured outputs:** support accessing raw responses ([#1058](https://github.com/openai/openai-node/issues/1058)) ([af17697](https://github.com/openai/openai-node/commit/af176979894ee95a51e09abc239a8fd3a639dcde)) + + +### Documentation + +* **azure:** example for custom base URL ([#1055](https://github.com/openai/openai-node/issues/1055)) ([20defc8](https://github.com/openai/openai-node/commit/20defc80801e1f1f489a07bd1264be71c56c586f)) +* **azure:** remove locale from docs link ([#1054](https://github.com/openai/openai-node/issues/1054)) ([f9b7eac](https://github.com/openai/openai-node/commit/f9b7eac58020cff0e367a15b9a2ca4e7c95cbb89)) + ## 4.58.2 (2024-09-09) Full Changelog: [v4.58.1...v4.58.2](https://github.com/openai/openai-node/compare/v4.58.1...v4.58.2) diff --git a/README.md b/README.md index 31a7776d6..11e3216f1 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.58.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.59.0/mod.ts'; ``` diff --git a/package.json b/package.json index b284c38a6..c3147af52 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.58.2", + "version": "4.59.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index ef3867dc2..32ac4ce7f 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.58.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.59.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index f650f76c3..22b9a14fc 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.58.2'; // x-release-please-version +export const VERSION = '4.59.0'; // x-release-please-version From dcb1bc6098964f55e6ce24e361c681093679abf0 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Thu, 12 Sep 2024 16:55:10 +0000 Subject: [PATCH 244/533] feat(api): add o1 models (#1061) See https://platform.openai.com/docs/guides/reasoning for details. --- .stats.yml | 2 +- src/resources/beta/assistants.ts | 36 +++++++++++--------- src/resources/beta/threads/runs/runs.ts | 12 +++---- src/resources/beta/threads/threads.ts | 12 +++---- src/resources/chat/chat.ts | 7 +++- src/resources/chat/completions.ts | 30 ++++++++++------ src/resources/completions.ts | 17 +++++++++ src/resources/fine-tuning/jobs/jobs.ts | 2 +- tests/api-resources/chat/completions.test.ts | 1 + 9 files changed, 77 insertions(+), 42 deletions(-) diff --git a/.stats.yml b/.stats.yml index 903c15996..de3167f3a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-85a85e0c08de456441431c0ae4e9c078cc8f9748c29430b9a9058340db6389ee.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-501122aa32adaa2abb3d4487880ab9cdf2141addce2e6c3d1bd9bb6b44c318a8.yml diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 0dbb076d5..410d520b0 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -151,11 +151,11 @@ export interface Assistant { * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - * Outputs which guarantees the model will match your supplied JSON schema. Learn - * more in the + * Outputs which ensures the model will match your supplied JSON schema. Learn more + * in the * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). * - * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the * message the model generates is valid JSON. * * **Important:** when using JSON mode, you **must** also instruct the model to @@ -665,7 +665,8 @@ export namespace FileSearchTool { max_num_results?: number; /** - * The ranking options for the file search. + * The ranking options for the file search. If not specified, the file search tool + * will use the `auto` ranker and a score_threshold of 0. * * See the * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) @@ -676,7 +677,8 @@ export namespace FileSearchTool { export namespace FileSearch { /** - * The ranking options for the file search. + * The ranking options for the file search. If not specified, the file search tool + * will use the `auto` ranker and a score_threshold of 0. * * See the * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) @@ -684,16 +686,16 @@ export namespace FileSearchTool { */ export interface RankingOptions { /** - * The ranker to use for the file search. If not specified will use the `auto` - * ranker. + * The score threshold for the file search. All values must be a floating point + * number between 0 and 1. */ - ranker?: 'auto' | 'default_2024_08_21'; + score_threshold: number; /** - * The score threshold for the file search. All values must be a floating point - * number between 0 and 1. + * The ranker to use for the file search. If not specified will use the `auto` + * ranker. */ - score_threshold?: number; + ranker?: 'auto' | 'default_2024_08_21'; } } } @@ -1125,11 +1127,11 @@ export interface AssistantCreateParams { * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - * Outputs which guarantees the model will match your supplied JSON schema. Learn - * more in the + * Outputs which ensures the model will match your supplied JSON schema. Learn more + * in the * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). * - * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the * message the model generates is valid JSON. * * **Important:** when using JSON mode, you **must** also instruct the model to @@ -1283,11 +1285,11 @@ export interface AssistantUpdateParams { * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - * Outputs which guarantees the model will match your supplied JSON schema. Learn - * more in the + * Outputs which ensures the model will match your supplied JSON schema. Learn more + * in the * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). * - * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the * message the model generates is valid JSON. * * **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index fe3a278e9..b48edd5b1 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -429,11 +429,11 @@ export interface Run { * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - * Outputs which guarantees the model will match your supplied JSON schema. Learn - * more in the + * Outputs which ensures the model will match your supplied JSON schema. Learn more + * in the * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). * - * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the * message the model generates is valid JSON. * * **Important:** when using JSON mode, you **must** also instruct the model to @@ -709,11 +709,11 @@ export interface RunCreateParamsBase { * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - * Outputs which guarantees the model will match your supplied JSON schema. Learn - * more in the + * Outputs which ensures the model will match your supplied JSON schema. Learn more + * in the * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). * - * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the * message the model generates is valid JSON. * * **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index c49618f0c..be959eb30 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -126,11 +126,11 @@ export class Threads extends APIResource { * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - * Outputs which guarantees the model will match your supplied JSON schema. Learn - * more in the + * Outputs which ensures the model will match your supplied JSON schema. Learn more + * in the * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). * - * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the * message the model generates is valid JSON. * * **Important:** when using JSON mode, you **must** also instruct the model to @@ -522,11 +522,11 @@ export interface ThreadCreateAndRunParamsBase { * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - * Outputs which guarantees the model will match your supplied JSON schema. Learn - * more in the + * Outputs which ensures the model will match your supplied JSON schema. Learn more + * in the * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). * - * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the * message the model generates is valid JSON. * * **Important:** when using JSON mode, you **must** also instruct the model to diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 684b1307a..1a758fbb5 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -9,9 +9,14 @@ export class Chat extends APIResource { } export type ChatModel = + | 'o1-preview' + | 'o1-preview-2024-09-12' + | 'o1-mini' + | 'o1-mini-2024-09-12' | 'gpt-4o' - | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' + | 'gpt-4o-2024-05-13' + | 'chatgpt-4o-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 764bdb129..f426ce36f 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -788,14 +788,21 @@ export interface ChatCompletionCreateParamsBase { */ logprobs?: boolean | null; + /** + * An upper bound for the number of tokens that can be generated for a completion, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + max_completion_tokens?: number | null; + /** * The maximum number of [tokens](/tokenizer) that can be generated in the chat - * completion. + * completion. This value can be used to control + * [costs](https://openai.com/api/pricing/) for text generated via API. * - * The total length of input tokens and generated tokens is limited by the model's - * context length. - * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - * for counting tokens. + * This value is now deprecated in favor of `max_completion_tokens`, and is not + * compatible with + * [o1 series models](https://platform.openai.com/docs/guides/reasoning). */ max_tokens?: number | null; @@ -830,11 +837,11 @@ export interface ChatCompletionCreateParamsBase { * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - * Outputs which guarantees the model will match your supplied JSON schema. Learn - * more in the + * Outputs which ensures the model will match your supplied JSON schema. Learn more + * in the * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). * - * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the * message the model generates is valid JSON. * * **Important:** when using JSON mode, you **must** also instruct the model to @@ -863,8 +870,11 @@ export interface ChatCompletionCreateParamsBase { * Specifies the latency tier to use for processing the request. This parameter is * relevant for customers subscribed to the scale tier service: * - * - If set to 'auto', the system will utilize scale tier credits until they are - * exhausted. + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. * - If set to 'default', the request will be processed using the default service * tier with a lower uptime SLA and no latency guarentee. * - When not set, the default behavior is 'auto'. diff --git a/src/resources/completions.ts b/src/resources/completions.ts index a6b527995..152496766 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -120,6 +120,23 @@ export interface CompletionUsage { * Total number of tokens used in the request (prompt + completion). */ total_tokens: number; + + /** + * Breakdown of tokens used in a completion. + */ + completion_tokens_details?: CompletionUsage.CompletionTokensDetails; +} + +export namespace CompletionUsage { + /** + * Breakdown of tokens used in a completion. + */ + export interface CompletionTokensDetails { + /** + * Tokens generated by the model for reasoning. + */ + reasoning_tokens?: number; + } } export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming; diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index aeb646279..54b5c4e6a 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -340,7 +340,7 @@ export interface JobCreateParams { seed?: number | null; /** - * A string of up to 18 characters that will be added to your fine-tuned model + * A string of up to 64 characters that will be added to your fine-tuned model * name. * * For example, a `suffix` of "custom-model-name" would produce a model name like diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 2179c52c3..692b953f2 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -32,6 +32,7 @@ describe('resource completions', () => { functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], logit_bias: { foo: 0 }, logprobs: true, + max_completion_tokens: 0, max_tokens: 0, n: 1, parallel_tool_calls: true, From 720a84336debce1beedeaedfc15a94ecd3afae0e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Sep 2024 16:55:32 +0000 Subject: [PATCH 245/533] release: 4.60.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 99d650398..6e417e5c0 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.59.0" + ".": "4.60.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index a245938c6..344fb67b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.60.0 (2024-09-12) + +Full Changelog: [v4.59.0...v4.60.0](https://github.com/openai/openai-node/compare/v4.59.0...v4.60.0) + +### Features + +* **api:** add o1 models ([#1061](https://github.com/openai/openai-node/issues/1061)) ([224cc04](https://github.com/openai/openai-node/commit/224cc045200cd1ce1517b4376c505de9b9a74ccc)) + ## 4.59.0 (2024-09-11) Full Changelog: [v4.58.2...v4.59.0](https://github.com/openai/openai-node/compare/v4.58.2...v4.59.0) diff --git a/README.md b/README.md index 11e3216f1..8ae42faaa 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.59.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.60.0/mod.ts'; ``` diff --git a/package.json b/package.json index c3147af52..be86f4c0d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.59.0", + "version": "4.60.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 32ac4ce7f..db3ec6eef 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.59.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.60.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 22b9a14fc..30c6fccf2 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.59.0'; // x-release-please-version +export const VERSION = '4.60.0'; // x-release-please-version From 908ae61f3c228e0c285f736aab226b1e6db5247f Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 13 Sep 2024 13:53:28 +0100 Subject: [PATCH 246/533] fix(zod): correctly add $ref definitions for transformed schemas (#1065) --- src/_vendor/zod-to-json-schema/parseDef.ts | 11 +- .../zod-to-json-schema/parsers/effects.ts | 8 +- tests/lib/__snapshots__/parser.test.ts.snap | 31 +++++ tests/lib/parser.test.ts | 114 ++++++++++++++++++ 4 files changed, 159 insertions(+), 5 deletions(-) diff --git a/src/_vendor/zod-to-json-schema/parseDef.ts b/src/_vendor/zod-to-json-schema/parseDef.ts index a8c8e7063..8af5ce4be 100644 --- a/src/_vendor/zod-to-json-schema/parseDef.ts +++ b/src/_vendor/zod-to-json-schema/parseDef.ts @@ -99,7 +99,7 @@ export function parseDef( refs.seen.set(def, newItem); - const jsonSchema = selectParser(def, (def as any).typeName, refs); + const jsonSchema = selectParser(def, (def as any).typeName, refs, forceResolution); if (jsonSchema) { addMeta(def, refs, jsonSchema); @@ -166,7 +166,12 @@ const getRelativePath = (pathA: string[], pathB: string[]) => { return [(pathA.length - i).toString(), ...pathB.slice(i)].join('/'); }; -const selectParser = (def: any, typeName: ZodFirstPartyTypeKind, refs: Refs): JsonSchema7Type | undefined => { +const selectParser = ( + def: any, + typeName: ZodFirstPartyTypeKind, + refs: Refs, + forceResolution: boolean, +): JsonSchema7Type | undefined => { switch (typeName) { case ZodFirstPartyTypeKind.ZodString: return parseStringDef(def, refs); @@ -217,7 +222,7 @@ const selectParser = (def: any, typeName: ZodFirstPartyTypeKind, refs: Refs): Js case ZodFirstPartyTypeKind.ZodNever: return parseNeverDef(); case ZodFirstPartyTypeKind.ZodEffects: - return parseEffectsDef(def, refs); + return parseEffectsDef(def, refs, forceResolution); case ZodFirstPartyTypeKind.ZodAny: return parseAnyDef(); case ZodFirstPartyTypeKind.ZodUnknown: diff --git a/src/_vendor/zod-to-json-schema/parsers/effects.ts b/src/_vendor/zod-to-json-schema/parsers/effects.ts index 23d368987..b010d5c47 100644 --- a/src/_vendor/zod-to-json-schema/parsers/effects.ts +++ b/src/_vendor/zod-to-json-schema/parsers/effects.ts @@ -2,6 +2,10 @@ import { ZodEffectsDef } from 'zod'; import { JsonSchema7Type, parseDef } from '../parseDef'; import { Refs } from '../Refs'; -export function parseEffectsDef(_def: ZodEffectsDef, refs: Refs): JsonSchema7Type | undefined { - return refs.effectStrategy === 'input' ? parseDef(_def.schema._def, refs) : {}; +export function parseEffectsDef( + _def: ZodEffectsDef, + refs: Refs, + forceResolution: boolean, +): JsonSchema7Type | undefined { + return refs.effectStrategy === 'input' ? parseDef(_def.schema._def, refs, forceResolution) : {}; } diff --git a/tests/lib/__snapshots__/parser.test.ts.snap b/tests/lib/__snapshots__/parser.test.ts.snap index 12e737f5c..11d68ab4e 100644 --- a/tests/lib/__snapshots__/parser.test.ts.snap +++ b/tests/lib/__snapshots__/parser.test.ts.snap @@ -112,6 +112,37 @@ exports[`.parse() zod recursive schema extraction 2`] = ` " `; +exports[`.parse() zod ref schemas with \`.transform()\` 2`] = ` +"{ + "id": "chatcmpl-A6zyLEtubMlUvGplOmr92S0mK0kiG", + "object": "chat.completion", + "created": 1726231553, + "model": "gpt-4o-2024-08-06", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "{\\"first\\":{\\"baz\\":true},\\"second\\":{\\"baz\\":false}}", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 167, + "completion_tokens": 13, + "total_tokens": 180, + "completion_tokens_details": { + "reasoning_tokens": 0 + } + }, + "system_fingerprint": "fp_143bb8492c" +} +" +`; + exports[`.parse() zod top-level recursive schemas 1`] = ` "{ "id": "chatcmpl-9uLhw79ArBF4KsQQOlsoE68m6vh6v", diff --git a/tests/lib/parser.test.ts b/tests/lib/parser.test.ts index cbcc2f186..b220e92d3 100644 --- a/tests/lib/parser.test.ts +++ b/tests/lib/parser.test.ts @@ -951,5 +951,119 @@ describe('.parse()', () => { } `); }); + + test('ref schemas with `.transform()`', async () => { + const Inner = z.object({ + baz: z.boolean().transform((v) => v ?? true), + }); + + const Outer = z.object({ + first: Inner, + second: Inner, + }); + + expect(zodResponseFormat(Outer, 'data').json_schema.schema).toMatchInlineSnapshot(` + { + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": false, + "definitions": { + "data": { + "additionalProperties": false, + "properties": { + "first": { + "additionalProperties": false, + "properties": { + "baz": { + "type": "boolean", + }, + }, + "required": [ + "baz", + ], + "type": "object", + }, + "second": { + "$ref": "#/definitions/data_properties_first", + }, + }, + "required": [ + "first", + "second", + ], + "type": "object", + }, + "data_properties_first": { + "additionalProperties": false, + "properties": { + "baz": { + "$ref": "#/definitions/data_properties_first_properties_baz", + }, + }, + "required": [ + "baz", + ], + "type": "object", + }, + "data_properties_first_properties_baz": { + "type": "boolean", + }, + }, + "properties": { + "first": { + "additionalProperties": false, + "properties": { + "baz": { + "type": "boolean", + }, + }, + "required": [ + "baz", + ], + "type": "object", + }, + "second": { + "$ref": "#/definitions/data_properties_first", + }, + }, + "required": [ + "first", + "second", + ], + "type": "object", + } + `); + + const completion = await makeSnapshotRequest( + (openai) => + openai.beta.chat.completions.parse({ + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'user', + content: 'can you generate fake data matching the given response format?', + }, + ], + response_format: zodResponseFormat(Outer, 'fakeData'), + }), + 2, + ); + + expect(completion.choices[0]?.message).toMatchInlineSnapshot(` + { + "content": "{"first":{"baz":true},"second":{"baz":false}}", + "parsed": { + "first": { + "baz": true, + }, + "second": { + "baz": false, + }, + }, + "refusal": null, + "role": "assistant", + "tool_calls": [], + } + `); + }); }); }); From 58eb50c781260290b3ef9d664ee3d5153e5d9479 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Sep 2024 12:53:50 +0000 Subject: [PATCH 247/533] release: 4.60.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6e417e5c0..0a2d02022 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.60.0" + ".": "4.60.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 344fb67b5..7bbcaaa2c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.60.1 (2024-09-13) + +Full Changelog: [v4.60.0...v4.60.1](https://github.com/openai/openai-node/compare/v4.60.0...v4.60.1) + +### Bug Fixes + +* **zod:** correctly add $ref definitions for transformed schemas ([#1065](https://github.com/openai/openai-node/issues/1065)) ([9b93b24](https://github.com/openai/openai-node/commit/9b93b24b8ae267fe403fb9cd4876d9772f40878b)) + ## 4.60.0 (2024-09-12) Full Changelog: [v4.59.0...v4.60.0](https://github.com/openai/openai-node/compare/v4.59.0...v4.60.0) diff --git a/README.md b/README.md index 8ae42faaa..35a958759 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.60.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.60.1/mod.ts'; ``` diff --git a/package.json b/package.json index be86f4c0d..2f684d212 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.60.0", + "version": "4.60.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index db3ec6eef..9ad4399b7 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.60.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.60.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 30c6fccf2..7070d859c 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.60.0'; // x-release-please-version +export const VERSION = '4.60.1'; // x-release-please-version From 90c2da5792f1e93dae162bba54be8a9c20704b1e Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 13 Sep 2024 16:15:28 +0100 Subject: [PATCH 248/533] fix(examples): handle usage chunk in tool call streaming (#1068) --------- Co-authored-by: Jacob Zimmerman --- examples/tool-calls-stream.ts | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/examples/tool-calls-stream.ts b/examples/tool-calls-stream.ts index 924e6b7cf..687ea86fd 100755 --- a/examples/tool-calls-stream.ts +++ b/examples/tool-calls-stream.ts @@ -184,7 +184,13 @@ function messageReducer(previous: ChatCompletionMessage, item: ChatCompletionChu } return acc; }; - return reduce(previous, item.choices[0]!.delta) as ChatCompletionMessage; + + const choice = item.choices[0]; + if (!choice) { + // chunk contains information about usage and token counts + return previous; + } + return reduce(previous, choice.delta) as ChatCompletionMessage; } function lineRewriter() { From 1fa551337a6d33df39f12dd973e80726c8e727a8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Sep 2024 15:29:42 +0000 Subject: [PATCH 249/533] docs: update CONTRIBUTING.md (#1071) --- CONTRIBUTING.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9e8f669a7..62b48d828 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,13 +14,13 @@ This will install all the required dependencies and build output files to `dist/ ## Modifying/Adding code -Most of the SDK is generated code, and any modified code will be overridden on the next generation. The -`src/lib/` and `examples/` directories are exceptions and will never be overridden. +Most of the SDK is generated code. Modifications to code will be persisted between generations, but may +result in merge conflicts between manual patches and changes from the generator. The generator will never +modify the contents of the `src/lib/` and `examples/` directories. ## Adding and running examples -All files in the `examples/` directory are not modified by the Stainless generator and can be freely edited or -added to. +All files in the `examples/` directory are not modified by the generator and can be freely edited or added to. ```bash // add an example to examples/.ts From bf72a084ec67549ff4414c94c6879cc6550b7aa4 Mon Sep 17 00:00:00 2001 From: Jacob Zimmerman Date: Sat, 7 Sep 2024 19:02:36 -0400 Subject: [PATCH 250/533] fix(client): partial parsing update to handle strings small testing additions lint --- package.json | 1 + src/_vendor/partial-json-parser/README.md | 2 +- src/_vendor/partial-json-parser/parser.ts | 469 +++++++++--------- .../partial-json-parsing.test.ts | 58 +++ yarn.lock | 12 + 5 files changed, 298 insertions(+), 244 deletions(-) create mode 100644 tests/_vendor/partial-json-parser/partial-json-parsing.test.ts diff --git a/package.json b/package.json index 2f684d212..934e1e722 100644 --- a/package.json +++ b/package.json @@ -43,6 +43,7 @@ "eslint": "^8.49.0", "eslint-plugin-prettier": "^5.0.1", "eslint-plugin-unused-imports": "^3.0.0", + "fast-check": "^3.22.0", "jest": "^29.4.0", "prettier": "^3.0.0", "prettier-2": "npm:prettier@^2", diff --git a/src/_vendor/partial-json-parser/README.md b/src/_vendor/partial-json-parser/README.md index bc6ea4e3d..d4e1c85d6 100644 --- a/src/_vendor/partial-json-parser/README.md +++ b/src/_vendor/partial-json-parser/README.md @@ -1,3 +1,3 @@ # Partial JSON Parser -Vendored from https://www.npmjs.com/package/partial-json-parser and updated to use TypeScript. +Vendored from https://www.npmjs.com/package/partial-json with some modifications diff --git a/src/_vendor/partial-json-parser/parser.ts b/src/_vendor/partial-json-parser/parser.ts index 9470c462f..5ee62b76b 100644 --- a/src/_vendor/partial-json-parser/parser.ts +++ b/src/_vendor/partial-json-parser/parser.ts @@ -1,264 +1,247 @@ -type Token = { - type: string; - value: string; +const STR = 0b000000001; +const NUM = 0b000000010; +const ARR = 0b000000100; +const OBJ = 0b000001000; +const NULL = 0b000010000; +const BOOL = 0b000100000; +const NAN = 0b001000000; +const INFINITY = 0b010000000; +const MINUS_INFINITY = 0b100000000; + +const INF = INFINITY | MINUS_INFINITY; +const SPECIAL = NULL | BOOL | INF | NAN; +const ATOM = STR | NUM | SPECIAL; +const COLLECTION = ARR | OBJ; +const ALL = ATOM | COLLECTION; + +const Allow = { + STR, + NUM, + ARR, + OBJ, + NULL, + BOOL, + NAN, + INFINITY, + MINUS_INFINITY, + INF, + SPECIAL, + ATOM, + COLLECTION, + ALL, }; -const tokenize = (input: string): Token[] => { - let current = 0; - let tokens: Token[] = []; - - while (current < input.length) { - let char = input[current]; - - if (char === '\\') { - current++; - continue; - } - - if (char === '{') { - tokens.push({ - type: 'brace', - value: '{', - }); - - current++; - continue; - } - - if (char === '}') { - tokens.push({ - type: 'brace', - value: '}', - }); - - current++; - continue; - } - - if (char === '[') { - tokens.push({ - type: 'paren', - value: '[', - }); - - current++; - continue; - } - - if (char === ']') { - tokens.push({ - type: 'paren', - value: ']', - }); - - current++; - continue; - } - - if (char === ':') { - tokens.push({ - type: 'separator', - value: ':', - }); - - current++; - continue; +// The JSON string segment was unable to be parsed completely +class PartialJSON extends Error {} + +class MalformedJSON extends Error {} + +/** + * Parse incomplete JSON + * @param {string} jsonString Partial JSON to be parsed + * @param {number} allowPartial Specify what types are allowed to be partial, see {@link Allow} for details + * @returns The parsed JSON + * @throws {PartialJSON} If the JSON is incomplete (related to the `allow` parameter) + * @throws {MalformedJSON} If the JSON is malformed + */ +function parseJSON(jsonString: string, allowPartial: number = Allow.ALL): any { + if (typeof jsonString !== 'string') { + throw new TypeError(`expecting str, got ${typeof jsonString}`); + } + if (!jsonString.trim()) { + throw new Error(`${jsonString} is empty`); + } + return _parseJSON(jsonString.trim(), allowPartial); +} + +const _parseJSON = (jsonString: string, allow: number) => { + const length = jsonString.length; + let index = 0; + + const markPartialJSON = (msg: string) => { + throw new PartialJSON(`${msg} at position ${index}`); + }; + + const throwMalformedError = (msg: string) => { + throw new MalformedJSON(`${msg} at position ${index}`); + }; + + const parseAny: () => any = () => { + skipBlank(); + if (index >= length) markPartialJSON('Unexpected end of input'); + if (jsonString[index] === '"') return parseStr(); + if (jsonString[index] === '{') return parseObj(); + if (jsonString[index] === '[') return parseArr(); + if ( + jsonString.substring(index, index + 4) === 'null' || + (Allow.NULL & allow && length - index < 4 && 'null'.startsWith(jsonString.substring(index))) + ) { + index += 4; + return null; + } + if ( + jsonString.substring(index, index + 4) === 'true' || + (Allow.BOOL & allow && length - index < 4 && 'true'.startsWith(jsonString.substring(index))) + ) { + index += 4; + return true; + } + if ( + jsonString.substring(index, index + 5) === 'false' || + (Allow.BOOL & allow && length - index < 5 && 'false'.startsWith(jsonString.substring(index))) + ) { + index += 5; + return false; + } + if ( + jsonString.substring(index, index + 8) === 'Infinity' || + (Allow.INFINITY & allow && length - index < 8 && 'Infinity'.startsWith(jsonString.substring(index))) + ) { + index += 8; + return Infinity; + } + if ( + jsonString.substring(index, index + 9) === '-Infinity' || + (Allow.MINUS_INFINITY & allow && + 1 < length - index && + length - index < 9 && + '-Infinity'.startsWith(jsonString.substring(index))) + ) { + index += 9; + return -Infinity; + } + if ( + jsonString.substring(index, index + 3) === 'NaN' || + (Allow.NAN & allow && length - index < 3 && 'NaN'.startsWith(jsonString.substring(index))) + ) { + index += 3; + return NaN; + } + return parseNum(); + }; + + const parseStr: () => string = () => { + const start = index; + let escape = false; + index++; // skip initial quote + while (index < length && (jsonString[index] !== '"' || (escape && jsonString[index - 1] === '\\'))) { + escape = jsonString[index] === '\\' ? !escape : false; + index++; + } + if (jsonString.charAt(index) == '"') { + try { + return JSON.parse(jsonString.substring(start, ++index - Number(escape))); + } catch (e) { + throwMalformedError(String(e)); } - - if (char === ',') { - tokens.push({ - type: 'delimiter', - value: ',', - }); - - current++; - continue; + } else if (Allow.STR & allow) { + try { + return JSON.parse(jsonString.substring(start, index - Number(escape)) + '"'); + } catch (e) { + // SyntaxError: Invalid escape sequence + return JSON.parse(jsonString.substring(start, jsonString.lastIndexOf('\\')) + '"'); } - - if (char === '"') { - let value = ''; - let danglingQuote = false; - - char = input[++current]; - - while (char !== '"') { - if (current === input.length) { - danglingQuote = true; - break; - } - - if (char === '\\') { - current++; - if (current === input.length) { - danglingQuote = true; - break; - } - value += char + input[current]; - char = input[++current]; - } else { - value += char; - char = input[++current]; - } - } - - char = input[++current]; - - if (!danglingQuote) { - tokens.push({ - type: 'string', - value, - }); + } + markPartialJSON('Unterminated string literal'); + }; + + const parseObj = () => { + index++; // skip initial brace + skipBlank(); + const obj: Record = {}; + try { + while (jsonString[index] !== '}') { + skipBlank(); + if (index >= length && Allow.OBJ & allow) return obj; + const key = parseStr(); + skipBlank(); + index++; // skip colon + try { + const value = parseAny(); + Object.defineProperty(obj, key, { value, writable: true, enumerable: true, configurable: true }); + } catch (e) { + if (Allow.OBJ & allow) return obj; + else throw e; } - continue; - } - - let WHITESPACE = /\s/; - if (char && WHITESPACE.test(char)) { - current++; - continue; + skipBlank(); + if (jsonString[index] === ',') index++; // skip comma } - - let NUMBERS = /[0-9]/; - if ((char && NUMBERS.test(char)) || char === '-' || char === '.') { - let value = ''; - - if (char === '-') { - value += char; - char = input[++current]; - } - - while ((char && NUMBERS.test(char)) || char === '.') { - value += char; - char = input[++current]; + } catch (e) { + if (Allow.OBJ & allow) return obj; + else markPartialJSON("Expected '}' at end of object"); + } + index++; // skip final brace + return obj; + }; + + const parseArr = () => { + index++; // skip initial bracket + const arr = []; + try { + while (jsonString[index] !== ']') { + arr.push(parseAny()); + skipBlank(); + if (jsonString[index] === ',') { + index++; // skip comma } - - tokens.push({ - type: 'number', - value, - }); - continue; } - - let LETTERS = /[a-z]/i; - if (char && LETTERS.test(char)) { - let value = ''; - - while (char && LETTERS.test(char)) { - if (current === input.length) { - break; - } - value += char; - char = input[++current]; - } - - if (value == 'true' || value == 'false' || value === 'null') { - tokens.push({ - type: 'name', - value, - }); - } else { - // unknown token, e.g. `nul` which isn't quite `null` - current++; - continue; - } - continue; + } catch (e) { + if (Allow.ARR & allow) { + return arr; } - - current++; + markPartialJSON("Expected ']' at end of array"); } - - return tokens; - }, - strip = (tokens: Token[]): Token[] => { - if (tokens.length === 0) { - return tokens; + index++; // skip final bracket + return arr; + }; + + const parseNum = () => { + if (index === 0) { + if (jsonString === '-' && Allow.NUM & allow) markPartialJSON("Not sure what '-' is"); + try { + return JSON.parse(jsonString); + } catch (e) { + if (Allow.NUM & allow) { + try { + if ('.' === jsonString[jsonString.length - 1]) + return JSON.parse(jsonString.substring(0, jsonString.lastIndexOf('.'))); + return JSON.parse(jsonString.substring(0, jsonString.lastIndexOf('e'))); + } catch (e) {} + } + throwMalformedError(String(e)); + } } - let lastToken = tokens[tokens.length - 1]!; + const start = index; - switch (lastToken.type) { - case 'separator': - tokens = tokens.slice(0, tokens.length - 1); - return strip(tokens); - break; - case 'number': - let lastCharacterOfLastToken = lastToken.value[lastToken.value.length - 1]; - if (lastCharacterOfLastToken === '.' || lastCharacterOfLastToken === '-') { - tokens = tokens.slice(0, tokens.length - 1); - return strip(tokens); - } - case 'string': - let tokenBeforeTheLastToken = tokens[tokens.length - 2]; - if (tokenBeforeTheLastToken?.type === 'delimiter') { - tokens = tokens.slice(0, tokens.length - 1); - return strip(tokens); - } else if (tokenBeforeTheLastToken?.type === 'brace' && tokenBeforeTheLastToken.value === '{') { - tokens = tokens.slice(0, tokens.length - 1); - return strip(tokens); - } - break; - case 'delimiter': - tokens = tokens.slice(0, tokens.length - 1); - return strip(tokens); - break; - } + if (jsonString[index] === '-') index++; + while (jsonString[index] && !',]}'.includes(jsonString[index]!)) index++; - return tokens; - }, - unstrip = (tokens: Token[]): Token[] => { - let tail: string[] = []; + if (index == length && !(Allow.NUM & allow)) markPartialJSON('Unterminated number literal'); - tokens.map((token) => { - if (token.type === 'brace') { - if (token.value === '{') { - tail.push('}'); - } else { - tail.splice(tail.lastIndexOf('}'), 1); - } + try { + return JSON.parse(jsonString.substring(start, index)); + } catch (e) { + if (jsonString.substring(start, index) === '-' && Allow.NUM & allow) + markPartialJSON("Not sure what '-' is"); + try { + return JSON.parse(jsonString.substring(start, jsonString.lastIndexOf('e'))); + } catch (e) { + throwMalformedError(String(e)); } - if (token.type === 'paren') { - if (token.value === '[') { - tail.push(']'); - } else { - tail.splice(tail.lastIndexOf(']'), 1); - } - } - }); - - if (tail.length > 0) { - tail.reverse().map((item) => { - if (item === '}') { - tokens.push({ - type: 'brace', - value: '}', - }); - } else if (item === ']') { - tokens.push({ - type: 'paren', - value: ']', - }); - } - }); } + }; - return tokens; - }, - generate = (tokens: Token[]): string => { - let output = ''; + const skipBlank = () => { + while (index < length && ' \n\r\t'.includes(jsonString[index]!)) { + index++; + } + }; - tokens.map((token) => { - switch (token.type) { - case 'string': - output += '"' + token.value + '"'; - break; - default: - output += token.value; - break; - } - }); + return parseAny(); +}; - return output; - }, - partialParse = (input: string): unknown => JSON.parse(generate(unstrip(strip(tokenize(input))))); +// using this function with malformed JSON is undefined behavior +const partialParse = (input: string) => parseJSON(input, Allow.ALL ^ Allow.NUM); -export { partialParse }; +export { partialParse, PartialJSON, MalformedJSON }; diff --git a/tests/_vendor/partial-json-parser/partial-json-parsing.test.ts b/tests/_vendor/partial-json-parser/partial-json-parsing.test.ts new file mode 100644 index 000000000..6fad8f1a9 --- /dev/null +++ b/tests/_vendor/partial-json-parser/partial-json-parsing.test.ts @@ -0,0 +1,58 @@ +import fc from 'fast-check'; +import { MalformedJSON, partialParse } from 'openai/_vendor/partial-json-parser/parser'; + +describe('partial parsing', () => { + test('should parse complete json', () => { + expect(partialParse('{"__proto__": 0}')).toEqual(JSON.parse('{"__proto__": 0}')); + + fc.assert( + fc.property(fc.json({ depthSize: 'large', noUnicodeString: false }), (jsonString) => { + const parsedNormal = JSON.parse(jsonString); + const parsedPartial = partialParse(jsonString); + expect(parsedPartial).toEqual(parsedNormal); + }), + ); + }); + + test('should parse partial json', () => { + expect(partialParse('{"field')).toEqual({}); + expect(partialParse('"')).toEqual(''); + expect(partialParse('[2, 3, 4')).toEqual([2, 3]); + expect(partialParse('{"field": true, "field2')).toEqual({ field: true }); + expect(partialParse('{"field": true, "field2":')).toEqual({ field: true }); + expect(partialParse('{"field": true, "field2":{')).toEqual({ field: true, field2: {} }); + expect(partialParse('{"field": true, "field2": { "obj": "somestr')).toEqual({ + field: true, + field2: { obj: 'somestr' }, + }); + expect(partialParse('{"field": true, "field2": { "obj": "somestr",')).toEqual({ + field: true, + field2: { obj: 'somestr' }, + }); + expect(partialParse('{"field": "va')).toEqual({ field: 'va' }); + expect(partialParse('[ "v1", 2, "v2", 3')).toEqual(['v1', 2, 'v2']); + expect(partialParse('[ "v1", 2, "v2", -')).toEqual(['v1', 2, 'v2']); + expect(partialParse('[1, 2e')).toEqual([1]); + }); + + test('should only throw errors parsing numbers', () => + fc.assert( + fc.property(fc.json({ depthSize: 'large', noUnicodeString: false }), (jsonString) => { + for (let i = 1; i < jsonString.length; i++) { + // speedup + i += Math.floor(Math.random() * 3); + const substring = jsonString.substring(0, i); + + // since we don't allow partial parsing for numbers + if ( + typeof JSON.parse(jsonString) === 'number' && + 'e-+.'.includes(substring[substring.length - 1]!) + ) { + expect(() => partialParse(substring)).toThrow(MalformedJSON); + } else { + partialParse(substring); + } + } + }), + )); +}); diff --git a/yarn.lock b/yarn.lock index 3c7bdb93e..68486892b 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1725,6 +1725,13 @@ expect@^29.0.0, expect@^29.7.0: jest-message-util "^29.7.0" jest-util "^29.7.0" +fast-check@^3.22.0: + version "3.22.0" + resolved "https://registry.yarnpkg.com/fast-check/-/fast-check-3.22.0.tgz#1a8153e9d6fbdcc60b818f447cbb9cac1fdd8fb6" + integrity sha512-8HKz3qXqnHYp/VCNn2qfjHdAdcI8zcSqOyX64GOMukp7SL2bfzfeDKjSd+UyECtejccaZv3LcvZTm9YDD22iCQ== + dependencies: + pure-rand "^6.1.0" + fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: version "3.1.3" resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" @@ -3037,6 +3044,11 @@ pure-rand@^6.0.0: resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.0.4.tgz#50b737f6a925468679bff00ad20eade53f37d5c7" integrity sha512-LA0Y9kxMYv47GIPJy6MI84fqTd2HmYZI83W/kM/SkKfDlajnZYfmXFTxkbY+xSBPkLJxltMa9hIkmdc29eguMA== +pure-rand@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.1.0.tgz#d173cf23258231976ccbdb05247c9787957604f2" + integrity sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA== + qs@^6.10.3: version "6.13.0" resolved "https://registry.yarnpkg.com/qs/-/qs-6.13.0.tgz#6ca3bd58439f7e245655798997787b0d88a51906" From 872f74c61b4aa2ea4072d345142d6e89be68f54b Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 13 Sep 2024 18:45:42 +0100 Subject: [PATCH 251/533] chore(examples): add a small delay to tool-calls example streaming --- examples/tool-calls-stream.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/examples/tool-calls-stream.ts b/examples/tool-calls-stream.ts index 687ea86fd..93f16a245 100755 --- a/examples/tool-calls-stream.ts +++ b/examples/tool-calls-stream.ts @@ -27,6 +27,9 @@ import { ChatCompletionMessageParam, } from 'openai/resources/chat'; +// Used so that the each chunk coming in is noticable +const CHUNK_DELAY_MS = 100; + // gets API Key from environment variable OPENAI_API_KEY const openai = new OpenAI(); @@ -126,6 +129,9 @@ async function main() { for await (const chunk of stream) { message = messageReducer(message, chunk); writeLine(message); + + // Add a small delay so that the chunks coming in are noticablej + await new Promise((resolve) => setTimeout(resolve, CHUNK_DELAY_MS)); } console.log(); messages.push(message); From 328f75caef55f2d7a829d3de520b741855e2e104 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Sep 2024 17:53:38 +0000 Subject: [PATCH 252/533] release: 4.61.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 24 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 0a2d02022..e284d59ae 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.60.1" + ".": "4.61.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7bbcaaa2c..0829a7151 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 4.61.0 (2024-09-13) + +Full Changelog: [v4.60.1...v4.61.0](https://github.com/openai/openai-node/compare/v4.60.1...v4.61.0) + +### Bug Fixes + +* **client:** partial parsing update to handle strings ([46e8eb6](https://github.com/openai/openai-node/commit/46e8eb6a9a45b11f9e4c97474ed6c02b1faa43af)) +* **examples:** handle usage chunk in tool call streaming ([#1068](https://github.com/openai/openai-node/issues/1068)) ([e4188c4](https://github.com/openai/openai-node/commit/e4188c4ba443a21d1ef94658df5366f80f0e573b)) + + +### Chores + +* **examples:** add a small delay to tool-calls example streaming ([a3fc659](https://github.com/openai/openai-node/commit/a3fc65928af7085d1d8d785ad4765fedc3955641)) + + +### Documentation + +* update CONTRIBUTING.md ([#1071](https://github.com/openai/openai-node/issues/1071)) ([5de81c9](https://github.com/openai/openai-node/commit/5de81c95d7326602865e007715a76d5595824fd9)) + ## 4.60.1 (2024-09-13) Full Changelog: [v4.60.0...v4.60.1](https://github.com/openai/openai-node/compare/v4.60.0...v4.60.1) diff --git a/README.md b/README.md index 35a958759..8811d8ab4 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.60.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.61.0/mod.ts'; ``` diff --git a/package.json b/package.json index 934e1e722..42b1d795a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.60.1", + "version": "4.61.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 9ad4399b7..a64186518 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.60.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.61.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 7070d859c..150565572 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.60.1'; // x-release-please-version +export const VERSION = '4.61.0'; // x-release-please-version From 5d4d74fd684699c5be420a6b441e7e46fdd43bed Mon Sep 17 00:00:00 2001 From: Matt Granmoe Date: Mon, 16 Sep 2024 03:21:14 -0500 Subject: [PATCH 253/533] fix(runTools): correct request options type (#1073) --- src/resources/beta/chat/completions.ts | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts index 113de4026..03ea0aab5 100644 --- a/src/resources/beta/chat/completions.ts +++ b/src/resources/beta/chat/completions.ts @@ -21,6 +21,7 @@ export { ParsingFunction, ParsingToolFunction, } from '../../../lib/RunnableFunction'; +import { RunnerOptions } from '../../../lib/AbstractChatCompletionRunner'; import { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; export { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; import { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; @@ -119,19 +120,19 @@ export class Completions extends APIResource { runTools< Params extends ChatCompletionToolRunnerParams, ParsedT = ExtractParsedContentFromParams, - >(body: Params, options?: Core.RequestOptions): ChatCompletionRunner; + >(body: Params, options?: RunnerOptions): ChatCompletionRunner; runTools< Params extends ChatCompletionStreamingToolRunnerParams, ParsedT = ExtractParsedContentFromParams, - >(body: Params, options?: Core.RequestOptions): ChatCompletionStreamingRunner; + >(body: Params, options?: RunnerOptions): ChatCompletionStreamingRunner; runTools< Params extends ChatCompletionToolRunnerParams | ChatCompletionStreamingToolRunnerParams, ParsedT = ExtractParsedContentFromParams, >( body: Params, - options?: Core.RequestOptions, + options?: RunnerOptions, ): ChatCompletionRunner | ChatCompletionStreamingRunner { if (body.stream) { return ChatCompletionStreamingRunner.runTools( From 0389da8d84c8c396b50f6efef72154102511d8c7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 12:23:48 +0000 Subject: [PATCH 254/533] chore(internal): update spec link (#1076) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index de3167f3a..2fc39385e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-501122aa32adaa2abb3d4487880ab9cdf2141addce2e6c3d1bd9bb6b44c318a8.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-ff407aa10917e62f2b0c12d1ad2c4f1258ed083bd45753c70eaaf5b1cf8356ae.yml From e26f31c4540ae0e61f649b400b120b95e12cf783 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2024 12:24:36 +0000 Subject: [PATCH 255/533] release: 4.61.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e284d59ae..8f32b4daf 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.61.0" + ".": "4.61.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 0829a7151..1fc81d615 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.61.1 (2024-09-16) + +Full Changelog: [v4.61.0...v4.61.1](https://github.com/openai/openai-node/compare/v4.61.0...v4.61.1) + +### Bug Fixes + +* **runTools:** correct request options type ([#1073](https://github.com/openai/openai-node/issues/1073)) ([399f971](https://github.com/openai/openai-node/commit/399f9710f9a1406fe2dd048a1d26418c0de7ff0c)) + + +### Chores + +* **internal:** update spec link ([#1076](https://github.com/openai/openai-node/issues/1076)) ([20f1bcc](https://github.com/openai/openai-node/commit/20f1bcce2b5d03c5185989212a5c5271a8d4209c)) + ## 4.61.0 (2024-09-13) Full Changelog: [v4.60.1...v4.61.0](https://github.com/openai/openai-node/compare/v4.60.1...v4.61.0) diff --git a/README.md b/README.md index 8811d8ab4..03ee259ee 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.61.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.61.1/mod.ts'; ``` diff --git a/package.json b/package.json index 42b1d795a..86e594c2c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.61.0", + "version": "4.61.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index a64186518..641b61c02 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.61.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.61.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 150565572..6c1e0cb8e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.61.0'; // x-release-please-version +export const VERSION = '4.61.1'; // x-release-please-version From d5c21314449091dd1c668c7358b25e041466f588 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 17 Sep 2024 17:20:58 +0100 Subject: [PATCH 256/533] feat(client): add ._request_id property to object responses (#1078) --- README.md | 11 +++++ src/core.ts | 58 +++++++++++++++------- tests/responses.test.ts | 104 +++++++++++++++++++++++++++++++++++++++- tests/utils/typing.ts | 9 ++++ 4 files changed, 165 insertions(+), 17 deletions(-) create mode 100644 tests/utils/typing.ts diff --git a/README.md b/README.md index 03ee259ee..b3de7fa55 100644 --- a/README.md +++ b/README.md @@ -361,6 +361,17 @@ Error codes are as followed: | >=500 | `InternalServerError` | | N/A | `APIConnectionError` | +## Request IDs + +> For more information on debugging requests, see [these docs](https://platform.openai.com/docs/api-reference/debugging-requests) + +All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. + +```ts +const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4' }); +console.log(completion._request_id) // req_123 +``` + ## Microsoft Azure OpenAI To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` diff --git a/src/core.ts b/src/core.ts index a4bb87a32..90714d3ce 100644 --- a/src/core.ts +++ b/src/core.ts @@ -37,7 +37,7 @@ type APIResponseProps = { controller: AbortController; }; -async function defaultParseResponse(props: APIResponseProps): Promise { +async function defaultParseResponse(props: APIResponseProps): Promise> { const { response } = props; if (props.options.stream) { debug('response', response.status, response.url, response.headers, response.body); @@ -54,11 +54,11 @@ async function defaultParseResponse(props: APIResponseProps): Promise { // fetch refuses to read the body when the status code is 204. if (response.status === 204) { - return null as T; + return null as WithRequestID; } if (props.options.__binaryResponse) { - return response as unknown as T; + return response as unknown as WithRequestID; } const contentType = response.headers.get('content-type'); @@ -69,26 +69,44 @@ async function defaultParseResponse(props: APIResponseProps): Promise { debug('response', response.status, response.url, response.headers, json); - return json as T; + return _addRequestID(json, response); } const text = await response.text(); debug('response', response.status, response.url, response.headers, text); // TODO handle blob, arraybuffer, other content types, etc. - return text as unknown as T; + return text as unknown as WithRequestID; +} + +type WithRequestID = + T extends Array | Response | AbstractPage ? T + : T extends Record ? T & { _request_id?: string | null } + : T; + +function _addRequestID(value: T, response: Response): WithRequestID { + if (!value || typeof value !== 'object' || Array.isArray(value)) { + return value as WithRequestID; + } + + return Object.defineProperty(value, '_request_id', { + value: response.headers.get('x-request-id'), + enumerable: false, + }) as WithRequestID; } /** * A subclass of `Promise` providing additional helper methods * for interacting with the SDK. */ -export class APIPromise extends Promise { - private parsedPromise: Promise | undefined; +export class APIPromise extends Promise> { + private parsedPromise: Promise> | undefined; constructor( private responsePromise: Promise, - private parseResponse: (props: APIResponseProps) => PromiseOrValue = defaultParseResponse, + private parseResponse: ( + props: APIResponseProps, + ) => PromiseOrValue> = defaultParseResponse, ) { super((resolve) => { // this is maybe a bit weird but this has to be a no-op to not implicitly @@ -99,7 +117,9 @@ export class APIPromise extends Promise { } _thenUnwrap(transform: (data: T) => U): APIPromise { - return new APIPromise(this.responsePromise, async (props) => transform(await this.parseResponse(props))); + return new APIPromise(this.responsePromise, async (props) => + _addRequestID(transform(await this.parseResponse(props)), props.response), + ); } /** @@ -136,15 +156,15 @@ export class APIPromise extends Promise { return { data, response }; } - private parse(): Promise { + private parse(): Promise> { if (!this.parsedPromise) { - this.parsedPromise = this.responsePromise.then(this.parseResponse); + this.parsedPromise = this.responsePromise.then(this.parseResponse) as any as Promise>; } return this.parsedPromise; } - override then( - onfulfilled?: ((value: T) => TResult1 | PromiseLike) | undefined | null, + override then, TResult2 = never>( + onfulfilled?: ((value: WithRequestID) => TResult1 | PromiseLike) | undefined | null, onrejected?: ((reason: any) => TResult2 | PromiseLike) | undefined | null, ): Promise { return this.parse().then(onfulfilled, onrejected); @@ -152,11 +172,11 @@ export class APIPromise extends Promise { override catch( onrejected?: ((reason: any) => TResult | PromiseLike) | undefined | null, - ): Promise { + ): Promise | TResult> { return this.parse().catch(onrejected); } - override finally(onfinally?: (() => void) | undefined | null): Promise { + override finally(onfinally?: (() => void) | undefined | null): Promise> { return this.parse().finally(onfinally); } } @@ -706,7 +726,13 @@ export class PagePromise< ) { super( request, - async (props) => new Page(client, props.response, await defaultParseResponse(props), props.options), + async (props) => + new Page( + client, + props.response, + await defaultParseResponse(props), + props.options, + ) as WithRequestID, ); } diff --git a/tests/responses.test.ts b/tests/responses.test.ts index ef6ba27bf..fbd073a79 100644 --- a/tests/responses.test.ts +++ b/tests/responses.test.ts @@ -1,5 +1,8 @@ -import { createResponseHeaders } from 'openai/core'; +import { APIPromise, createResponseHeaders } from 'openai/core'; +import OpenAI from 'openai/index'; import { Headers } from 'openai/_shims/index'; +import { Response } from 'node-fetch'; +import { compareType } from './utils/typing'; describe('response parsing', () => { // TODO: test unicode characters @@ -23,3 +26,102 @@ describe('response parsing', () => { expect(headers['content-type']).toBe('text/xml, application/json'); }); }); + +describe('request id', () => { + test('types', () => { + compareType>, string>(true); + compareType>, number>(true); + compareType>, null>(true); + compareType>, void>(true); + compareType>, Response>(true); + compareType>, Response>(true); + compareType>, { foo: string } & { _request_id?: string | null }>( + true, + ); + compareType>>, Array<{ foo: string }>>(true); + }); + + test('object response', async () => { + const client = new OpenAI({ + apiKey: 'dummy', + fetch: async () => + new Response(JSON.stringify({ id: 'bar' }), { + headers: { 'x-request-id': 'req_id_xxx', 'content-type': 'application/json' }, + }), + }); + + const rsp = await client.chat.completions.create({ messages: [], model: 'gpt-4' }); + expect(rsp.id).toBe('bar'); + expect(rsp._request_id).toBe('req_id_xxx'); + expect(JSON.stringify(rsp)).toBe('{"id":"bar"}'); + }); + + test('envelope response', async () => { + const promise = new APIPromise<{ data: { foo: string } }>( + (async () => { + return { + response: new Response(JSON.stringify({ data: { foo: 'bar' } }), { + headers: { 'x-request-id': 'req_id_xxx', 'content-type': 'application/json' }, + }), + controller: {} as any, + options: {} as any, + }; + })(), + )._thenUnwrap((d) => d.data); + + const rsp = await promise; + expect(rsp.foo).toBe('bar'); + expect(rsp._request_id).toBe('req_id_xxx'); + }); + + test('page response', async () => { + const client = new OpenAI({ + apiKey: 'dummy', + fetch: async () => + new Response(JSON.stringify({ data: [{ foo: 'bar' }] }), { + headers: { 'x-request-id': 'req_id_xxx', 'content-type': 'application/json' }, + }), + }); + + const page = await client.fineTuning.jobs.list(); + expect(page.data).toMatchObject([{ foo: 'bar' }]); + expect((page as any)._request_id).toBeUndefined(); + }); + + test('array response', async () => { + const promise = new APIPromise>( + (async () => { + return { + response: new Response(JSON.stringify([{ foo: 'bar' }]), { + headers: { 'x-request-id': 'req_id_xxx', 'content-type': 'application/json' }, + }), + controller: {} as any, + options: {} as any, + }; + })(), + ); + + const rsp = await promise; + expect(rsp.length).toBe(1); + expect(rsp[0]).toMatchObject({ foo: 'bar' }); + expect((rsp as any)._request_id).toBeUndefined(); + }); + + test('string response', async () => { + const promise = new APIPromise( + (async () => { + return { + response: new Response('hello world', { + headers: { 'x-request-id': 'req_id_xxx', 'content-type': 'application/text' }, + }), + controller: {} as any, + options: {} as any, + }; + })(), + ); + + const result = await promise; + expect(result).toBe('hello world'); + expect((result as any)._request_id).toBeUndefined(); + }); +}); diff --git a/tests/utils/typing.ts b/tests/utils/typing.ts new file mode 100644 index 000000000..4a791d2a7 --- /dev/null +++ b/tests/utils/typing.ts @@ -0,0 +1,9 @@ +type Equal = (() => T extends X ? 1 : 2) extends () => T extends Y ? 1 : 2 ? true : false; + +export const expectType = (_expression: T): void => { + return; +}; + +export const compareType = (_expression: Equal): void => { + return; +}; From 1eb4e124ef635488de9cd3001cd94aad6c9a6f24 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 17 Sep 2024 12:20:35 +0000 Subject: [PATCH 257/533] chore(internal): add query string encoder (#1079) --- package.json | 5 +- scripts/utils/denoify.ts | 3 - src/index.ts | 4 +- src/internal/qs/LICENSE.md | 13 + src/internal/qs/README.md | 3 + src/internal/qs/formats.ts | 9 + src/internal/qs/index.ts | 13 + src/internal/qs/stringify.ts | 388 ++++++ src/internal/qs/types.ts | 71 ++ src/internal/qs/utils.ts | 265 ++++ tests/qs/empty-keys-cases.ts | 271 +++++ tests/qs/stringify.test.ts | 2232 ++++++++++++++++++++++++++++++++++ tests/qs/utils.test.ts | 169 +++ yarn.lock | 144 +-- 14 files changed, 3462 insertions(+), 128 deletions(-) create mode 100644 src/internal/qs/LICENSE.md create mode 100644 src/internal/qs/README.md create mode 100644 src/internal/qs/formats.ts create mode 100644 src/internal/qs/index.ts create mode 100644 src/internal/qs/stringify.ts create mode 100644 src/internal/qs/types.ts create mode 100644 src/internal/qs/utils.ts create mode 100644 tests/qs/empty-keys-cases.ts create mode 100644 tests/qs/stringify.test.ts create mode 100644 tests/qs/utils.test.ts diff --git a/package.json b/package.json index 86e594c2c..a65c9d6ba 100644 --- a/package.json +++ b/package.json @@ -26,13 +26,11 @@ "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", - "@types/qs": "^6.9.15", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7", - "qs": "^6.10.3" + "node-fetch": "^2.6.7" }, "devDependencies": { "@swc/core": "^1.3.102", @@ -43,6 +41,7 @@ "eslint": "^8.49.0", "eslint-plugin-prettier": "^5.0.1", "eslint-plugin-unused-imports": "^3.0.0", + "iconv-lite": "^0.6.3", "fast-check": "^3.22.0", "jest": "^29.4.0", "prettier": "^3.0.0", diff --git a/scripts/utils/denoify.ts b/scripts/utils/denoify.ts index 742bc069f..52705802a 100644 --- a/scripts/utils/denoify.ts +++ b/scripts/utils/denoify.ts @@ -102,9 +102,6 @@ async function denoify() { } else if (specifier.startsWith(pkgName + '/')) { // convert self-referencing module specifiers to relative paths specifier = file.getRelativePathAsModuleSpecifierTo(denoDir + specifier.substring(pkgName.length)); - } else if (specifier === 'qs') { - decl.replaceWithText(`import { qs } from "https://deno.land/x/deno_qs@0.0.1/mod.ts"`); - continue; } else if (!decl.isModuleSpecifierRelative()) { specifier = `npm:${specifier}`; } diff --git a/src/index.ts b/src/index.ts index 36064286d..b52406f6c 100644 --- a/src/index.ts +++ b/src/index.ts @@ -2,10 +2,8 @@ import * as Errors from './error'; import * as Uploads from './uploads'; - import { type Agent, type RequestInit } from './_shims/index'; -import * as qs from 'qs'; - +import * as qs from './internal/qs'; import * as Core from './core'; import * as Pagination from './pagination'; import * as API from './resources/index'; diff --git a/src/internal/qs/LICENSE.md b/src/internal/qs/LICENSE.md new file mode 100644 index 000000000..3fda1573b --- /dev/null +++ b/src/internal/qs/LICENSE.md @@ -0,0 +1,13 @@ +BSD 3-Clause License + +Copyright (c) 2014, Nathan LaFreniere and other [contributors](https://github.com/puruvj/neoqs/graphs/contributors) All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/internal/qs/README.md b/src/internal/qs/README.md new file mode 100644 index 000000000..67ae04ecd --- /dev/null +++ b/src/internal/qs/README.md @@ -0,0 +1,3 @@ +# qs + +This is a vendored version of [neoqs](https://github.com/PuruVJ/neoqs) which is a TypeScript rewrite of [qs](https://github.com/ljharb/qs), a query string library. diff --git a/src/internal/qs/formats.ts b/src/internal/qs/formats.ts new file mode 100644 index 000000000..1cf9e2cde --- /dev/null +++ b/src/internal/qs/formats.ts @@ -0,0 +1,9 @@ +import type { Format } from './types'; + +export const default_format: Format = 'RFC3986'; +export const formatters: Record string> = { + RFC1738: (v: PropertyKey) => String(v).replace(/%20/g, '+'), + RFC3986: (v: PropertyKey) => String(v), +}; +export const RFC1738 = 'RFC1738'; +export const RFC3986 = 'RFC3986'; diff --git a/src/internal/qs/index.ts b/src/internal/qs/index.ts new file mode 100644 index 000000000..c3a3620d0 --- /dev/null +++ b/src/internal/qs/index.ts @@ -0,0 +1,13 @@ +import { default_format, formatters, RFC1738, RFC3986 } from './formats'; + +const formats = { + formatters, + RFC1738, + RFC3986, + default: default_format, +}; + +export { stringify } from './stringify'; +export { formats }; + +export type { DefaultDecoder, DefaultEncoder, Format, ParseOptions, StringifyOptions } from './types'; diff --git a/src/internal/qs/stringify.ts b/src/internal/qs/stringify.ts new file mode 100644 index 000000000..d0c450341 --- /dev/null +++ b/src/internal/qs/stringify.ts @@ -0,0 +1,388 @@ +import { encode, is_buffer, maybe_map } from './utils'; +import { default_format, formatters } from './formats'; +import type { NonNullableProperties, StringifyOptions } from './types'; + +const has = Object.prototype.hasOwnProperty; + +const array_prefix_generators = { + brackets(prefix: PropertyKey) { + return String(prefix) + '[]'; + }, + comma: 'comma', + indices(prefix: PropertyKey, key: string) { + return String(prefix) + '[' + key + ']'; + }, + repeat(prefix: PropertyKey) { + return String(prefix); + }, +}; + +const is_array = Array.isArray; +const push = Array.prototype.push; +const push_to_array = function (arr: any[], value_or_array: any) { + push.apply(arr, is_array(value_or_array) ? value_or_array : [value_or_array]); +}; + +const to_ISO = Date.prototype.toISOString; + +const defaults = { + addQueryPrefix: false, + allowDots: false, + allowEmptyArrays: false, + arrayFormat: 'indices', + charset: 'utf-8', + charsetSentinel: false, + delimiter: '&', + encode: true, + encodeDotInKeys: false, + encoder: encode, + encodeValuesOnly: false, + format: default_format, + formatter: formatters[default_format], + /** @deprecated */ + indices: false, + serializeDate(date) { + return to_ISO.call(date); + }, + skipNulls: false, + strictNullHandling: false, +} as NonNullableProperties; + +function is_non_nullish_primitive(v: unknown): v is string | number | boolean | symbol | bigint { + return ( + typeof v === 'string' || + typeof v === 'number' || + typeof v === 'boolean' || + typeof v === 'symbol' || + typeof v === 'bigint' + ); +} + +const sentinel = {}; + +function inner_stringify( + object: any, + prefix: PropertyKey, + generateArrayPrefix: StringifyOptions['arrayFormat'] | ((prefix: string, key: string) => string), + commaRoundTrip: boolean, + allowEmptyArrays: boolean, + strictNullHandling: boolean, + skipNulls: boolean, + encodeDotInKeys: boolean, + encoder: StringifyOptions['encoder'], + filter: StringifyOptions['filter'], + sort: StringifyOptions['sort'], + allowDots: StringifyOptions['allowDots'], + serializeDate: StringifyOptions['serializeDate'], + format: StringifyOptions['format'], + formatter: StringifyOptions['formatter'], + encodeValuesOnly: boolean, + charset: StringifyOptions['charset'], + sideChannel: WeakMap, +) { + let obj = object; + + let tmp_sc = sideChannel; + let step = 0; + let find_flag = false; + while ((tmp_sc = tmp_sc.get(sentinel)) !== void undefined && !find_flag) { + // Where object last appeared in the ref tree + const pos = tmp_sc.get(object); + step += 1; + if (typeof pos !== 'undefined') { + if (pos === step) { + throw new RangeError('Cyclic object value'); + } else { + find_flag = true; // Break while + } + } + if (typeof tmp_sc.get(sentinel) === 'undefined') { + step = 0; + } + } + + if (typeof filter === 'function') { + obj = filter(prefix, obj); + } else if (obj instanceof Date) { + obj = serializeDate?.(obj); + } else if (generateArrayPrefix === 'comma' && is_array(obj)) { + obj = maybe_map(obj, function (value) { + if (value instanceof Date) { + return serializeDate?.(value); + } + return value; + }); + } + + if (obj === null) { + if (strictNullHandling) { + return encoder && !encodeValuesOnly ? + // @ts-expect-error + encoder(prefix, defaults.encoder, charset, 'key', format) + : prefix; + } + + obj = ''; + } + + if (is_non_nullish_primitive(obj) || is_buffer(obj)) { + if (encoder) { + const key_value = + encodeValuesOnly ? prefix + // @ts-expect-error + : encoder(prefix, defaults.encoder, charset, 'key', format); + return [ + formatter?.(key_value) + + '=' + + // @ts-expect-error + formatter?.(encoder(obj, defaults.encoder, charset, 'value', format)), + ]; + } + return [formatter?.(prefix) + '=' + formatter?.(String(obj))]; + } + + const values: string[] = []; + + if (typeof obj === 'undefined') { + return values; + } + + let obj_keys; + if (generateArrayPrefix === 'comma' && is_array(obj)) { + // we need to join elements in + if (encodeValuesOnly && encoder) { + // @ts-expect-error values only + obj = maybe_map(obj, encoder); + } + obj_keys = [{ value: obj.length > 0 ? obj.join(',') || null : void undefined }]; + } else if (is_array(filter)) { + obj_keys = filter; + } else { + const keys = Object.keys(obj); + obj_keys = sort ? keys.sort(sort) : keys; + } + + const encoded_prefix = encodeDotInKeys ? String(prefix).replace(/\./g, '%2E') : String(prefix); + + const adjusted_prefix = + commaRoundTrip && is_array(obj) && obj.length === 1 ? encoded_prefix + '[]' : encoded_prefix; + + if (allowEmptyArrays && is_array(obj) && obj.length === 0) { + return adjusted_prefix + '[]'; + } + + for (let j = 0; j < obj_keys.length; ++j) { + const key = obj_keys[j]; + const value = + // @ts-ignore + typeof key === 'object' && typeof key.value !== 'undefined' ? key.value : obj[key as any]; + + if (skipNulls && value === null) { + continue; + } + + // @ts-ignore + const encoded_key = allowDots && encodeDotInKeys ? (key as any).replace(/\./g, '%2E') : key; + const key_prefix = + is_array(obj) ? + typeof generateArrayPrefix === 'function' ? + generateArrayPrefix(adjusted_prefix, encoded_key) + : adjusted_prefix + : adjusted_prefix + (allowDots ? '.' + encoded_key : '[' + encoded_key + ']'); + + sideChannel.set(object, step); + const valueSideChannel = new WeakMap(); + valueSideChannel.set(sentinel, sideChannel); + push_to_array( + values, + inner_stringify( + value, + key_prefix, + generateArrayPrefix, + commaRoundTrip, + allowEmptyArrays, + strictNullHandling, + skipNulls, + encodeDotInKeys, + // @ts-expect-error + generateArrayPrefix === 'comma' && encodeValuesOnly && is_array(obj) ? null : encoder, + filter, + sort, + allowDots, + serializeDate, + format, + formatter, + encodeValuesOnly, + charset, + valueSideChannel, + ), + ); + } + + return values; +} + +function normalize_stringify_options( + opts: StringifyOptions = defaults, +): NonNullableProperties { + if (typeof opts.allowEmptyArrays !== 'undefined' && typeof opts.allowEmptyArrays !== 'boolean') { + throw new TypeError('`allowEmptyArrays` option can only be `true` or `false`, when provided'); + } + + if (typeof opts.encodeDotInKeys !== 'undefined' && typeof opts.encodeDotInKeys !== 'boolean') { + throw new TypeError('`encodeDotInKeys` option can only be `true` or `false`, when provided'); + } + + if (opts.encoder !== null && typeof opts.encoder !== 'undefined' && typeof opts.encoder !== 'function') { + throw new TypeError('Encoder has to be a function.'); + } + + const charset = opts.charset || defaults.charset; + if (typeof opts.charset !== 'undefined' && opts.charset !== 'utf-8' && opts.charset !== 'iso-8859-1') { + throw new TypeError('The charset option must be either utf-8, iso-8859-1, or undefined'); + } + + let format = default_format; + if (typeof opts.format !== 'undefined') { + if (!has.call(formatters, opts.format)) { + throw new TypeError('Unknown format option provided.'); + } + format = opts.format; + } + const formatter = formatters[format]; + + let filter = defaults.filter; + if (typeof opts.filter === 'function' || is_array(opts.filter)) { + filter = opts.filter; + } + + let arrayFormat: StringifyOptions['arrayFormat']; + if (opts.arrayFormat && opts.arrayFormat in array_prefix_generators) { + arrayFormat = opts.arrayFormat; + } else if ('indices' in opts) { + arrayFormat = opts.indices ? 'indices' : 'repeat'; + } else { + arrayFormat = defaults.arrayFormat; + } + + if ('commaRoundTrip' in opts && typeof opts.commaRoundTrip !== 'boolean') { + throw new TypeError('`commaRoundTrip` must be a boolean, or absent'); + } + + const allowDots = + typeof opts.allowDots === 'undefined' ? + !!opts.encodeDotInKeys === true ? + true + : defaults.allowDots + : !!opts.allowDots; + + return { + addQueryPrefix: typeof opts.addQueryPrefix === 'boolean' ? opts.addQueryPrefix : defaults.addQueryPrefix, + // @ts-ignore + allowDots: allowDots, + allowEmptyArrays: + typeof opts.allowEmptyArrays === 'boolean' ? !!opts.allowEmptyArrays : defaults.allowEmptyArrays, + arrayFormat: arrayFormat, + charset: charset, + charsetSentinel: + typeof opts.charsetSentinel === 'boolean' ? opts.charsetSentinel : defaults.charsetSentinel, + commaRoundTrip: !!opts.commaRoundTrip, + delimiter: typeof opts.delimiter === 'undefined' ? defaults.delimiter : opts.delimiter, + encode: typeof opts.encode === 'boolean' ? opts.encode : defaults.encode, + encodeDotInKeys: + typeof opts.encodeDotInKeys === 'boolean' ? opts.encodeDotInKeys : defaults.encodeDotInKeys, + encoder: typeof opts.encoder === 'function' ? opts.encoder : defaults.encoder, + encodeValuesOnly: + typeof opts.encodeValuesOnly === 'boolean' ? opts.encodeValuesOnly : defaults.encodeValuesOnly, + filter: filter, + format: format, + formatter: formatter, + serializeDate: typeof opts.serializeDate === 'function' ? opts.serializeDate : defaults.serializeDate, + skipNulls: typeof opts.skipNulls === 'boolean' ? opts.skipNulls : defaults.skipNulls, + // @ts-expect-error + sort: typeof opts.sort === 'function' ? opts.sort : null, + strictNullHandling: + typeof opts.strictNullHandling === 'boolean' ? opts.strictNullHandling : defaults.strictNullHandling, + }; +} + +export function stringify(object: any, opts: StringifyOptions = {}) { + let obj = object; + const options = normalize_stringify_options(opts); + + let obj_keys: PropertyKey[] | undefined; + let filter; + + if (typeof options.filter === 'function') { + filter = options.filter; + obj = filter('', obj); + } else if (is_array(options.filter)) { + filter = options.filter; + obj_keys = filter; + } + + const keys: string[] = []; + + if (typeof obj !== 'object' || obj === null) { + return ''; + } + + const generateArrayPrefix = array_prefix_generators[options.arrayFormat]; + const commaRoundTrip = generateArrayPrefix === 'comma' && options.commaRoundTrip; + + if (!obj_keys) { + obj_keys = Object.keys(obj); + } + + if (options.sort) { + obj_keys.sort(options.sort); + } + + const sideChannel = new WeakMap(); + for (let i = 0; i < obj_keys.length; ++i) { + const key = obj_keys[i]!; + + if (options.skipNulls && obj[key] === null) { + continue; + } + push_to_array( + keys, + inner_stringify( + obj[key], + key, + // @ts-expect-error + generateArrayPrefix, + commaRoundTrip, + options.allowEmptyArrays, + options.strictNullHandling, + options.skipNulls, + options.encodeDotInKeys, + options.encode ? options.encoder : null, + options.filter, + options.sort, + options.allowDots, + options.serializeDate, + options.format, + options.formatter, + options.encodeValuesOnly, + options.charset, + sideChannel, + ), + ); + } + + const joined = keys.join(options.delimiter); + let prefix = options.addQueryPrefix === true ? '?' : ''; + + if (options.charsetSentinel) { + if (options.charset === 'iso-8859-1') { + // encodeURIComponent('✓'), the "numeric entity" representation of a checkmark + prefix += 'utf8=%26%2310003%3B&'; + } else { + // encodeURIComponent('✓') + prefix += 'utf8=%E2%9C%93&'; + } + } + + return joined.length > 0 ? prefix + joined : ''; +} diff --git a/src/internal/qs/types.ts b/src/internal/qs/types.ts new file mode 100644 index 000000000..7c28dbb46 --- /dev/null +++ b/src/internal/qs/types.ts @@ -0,0 +1,71 @@ +export type Format = 'RFC1738' | 'RFC3986'; + +export type DefaultEncoder = (str: any, defaultEncoder?: any, charset?: string) => string; +export type DefaultDecoder = (str: string, decoder?: any, charset?: string) => string; + +export type BooleanOptional = boolean | undefined; + +export type StringifyBaseOptions = { + delimiter?: string; + allowDots?: boolean; + encodeDotInKeys?: boolean; + strictNullHandling?: boolean; + skipNulls?: boolean; + encode?: boolean; + encoder?: ( + str: any, + defaultEncoder: DefaultEncoder, + charset: string, + type: 'key' | 'value', + format?: Format, + ) => string; + filter?: Array | ((prefix: PropertyKey, value: any) => any); + arrayFormat?: 'indices' | 'brackets' | 'repeat' | 'comma'; + indices?: boolean; + sort?: ((a: PropertyKey, b: PropertyKey) => number) | null; + serializeDate?: (d: Date) => string; + format?: 'RFC1738' | 'RFC3986'; + formatter?: (str: PropertyKey) => string; + encodeValuesOnly?: boolean; + addQueryPrefix?: boolean; + charset?: 'utf-8' | 'iso-8859-1'; + charsetSentinel?: boolean; + allowEmptyArrays?: boolean; + commaRoundTrip?: boolean; +}; + +export type StringifyOptions = StringifyBaseOptions; + +export type ParseBaseOptions = { + comma?: boolean; + delimiter?: string | RegExp; + depth?: number | false; + decoder?: (str: string, defaultDecoder: DefaultDecoder, charset: string, type: 'key' | 'value') => any; + arrayLimit?: number; + parseArrays?: boolean; + plainObjects?: boolean; + allowPrototypes?: boolean; + allowSparse?: boolean; + parameterLimit?: number; + strictDepth?: boolean; + strictNullHandling?: boolean; + ignoreQueryPrefix?: boolean; + charset?: 'utf-8' | 'iso-8859-1'; + charsetSentinel?: boolean; + interpretNumericEntities?: boolean; + allowEmptyArrays?: boolean; + duplicates?: 'combine' | 'first' | 'last'; + allowDots?: boolean; + decodeDotInKeys?: boolean; +}; + +export type ParseOptions = ParseBaseOptions; + +export type ParsedQs = { + [key: string]: undefined | string | string[] | ParsedQs | ParsedQs[]; +}; + +// Type to remove null or undefined union from each property +export type NonNullableProperties = { + [K in keyof T]-?: Exclude; +}; diff --git a/src/internal/qs/utils.ts b/src/internal/qs/utils.ts new file mode 100644 index 000000000..113b18fb9 --- /dev/null +++ b/src/internal/qs/utils.ts @@ -0,0 +1,265 @@ +import { RFC1738 } from './formats'; +import type { DefaultEncoder, Format } from './types'; + +const has = Object.prototype.hasOwnProperty; +const is_array = Array.isArray; + +const hex_table = (() => { + const array = []; + for (let i = 0; i < 256; ++i) { + array.push('%' + ((i < 16 ? '0' : '') + i.toString(16)).toUpperCase()); + } + + return array; +})(); + +function compact_queue>(queue: Array<{ obj: T; prop: string }>) { + while (queue.length > 1) { + const item = queue.pop(); + if (!item) continue; + + const obj = item.obj[item.prop]; + + if (is_array(obj)) { + const compacted: unknown[] = []; + + for (let j = 0; j < obj.length; ++j) { + if (typeof obj[j] !== 'undefined') { + compacted.push(obj[j]); + } + } + + // @ts-ignore + item.obj[item.prop] = compacted; + } + } +} + +function array_to_object(source: any[], options: { plainObjects: boolean }) { + const obj = options && options.plainObjects ? Object.create(null) : {}; + for (let i = 0; i < source.length; ++i) { + if (typeof source[i] !== 'undefined') { + obj[i] = source[i]; + } + } + + return obj; +} + +export function merge( + target: any, + source: any, + options: { plainObjects?: boolean; allowPrototypes?: boolean } = {}, +) { + if (!source) { + return target; + } + + if (typeof source !== 'object') { + if (is_array(target)) { + target.push(source); + } else if (target && typeof target === 'object') { + if ( + (options && (options.plainObjects || options.allowPrototypes)) || + !has.call(Object.prototype, source) + ) { + target[source] = true; + } + } else { + return [target, source]; + } + + return target; + } + + if (!target || typeof target !== 'object') { + return [target].concat(source); + } + + let mergeTarget = target; + if (is_array(target) && !is_array(source)) { + // @ts-ignore + mergeTarget = array_to_object(target, options); + } + + if (is_array(target) && is_array(source)) { + source.forEach(function (item, i) { + if (has.call(target, i)) { + const targetItem = target[i]; + if (targetItem && typeof targetItem === 'object' && item && typeof item === 'object') { + target[i] = merge(targetItem, item, options); + } else { + target.push(item); + } + } else { + target[i] = item; + } + }); + return target; + } + + return Object.keys(source).reduce(function (acc, key) { + const value = source[key]; + + if (has.call(acc, key)) { + acc[key] = merge(acc[key], value, options); + } else { + acc[key] = value; + } + return acc; + }, mergeTarget); +} + +export function assign_single_source(target: any, source: any) { + return Object.keys(source).reduce(function (acc, key) { + acc[key] = source[key]; + return acc; + }, target); +} + +export function decode(str: string, _: any, charset: string) { + const strWithoutPlus = str.replace(/\+/g, ' '); + if (charset === 'iso-8859-1') { + // unescape never throws, no try...catch needed: + return strWithoutPlus.replace(/%[0-9a-f]{2}/gi, unescape); + } + // utf-8 + try { + return decodeURIComponent(strWithoutPlus); + } catch (e) { + return strWithoutPlus; + } +} + +const limit = 1024; + +export const encode: ( + str: any, + defaultEncoder: DefaultEncoder, + charset: string, + type: 'key' | 'value', + format: Format, +) => string = (str, _defaultEncoder, charset, _kind, format: Format) => { + // This code was originally written by Brian White for the io.js core querystring library. + // It has been adapted here for stricter adherence to RFC 3986 + if (str.length === 0) { + return str; + } + + let string = str; + if (typeof str === 'symbol') { + string = Symbol.prototype.toString.call(str); + } else if (typeof str !== 'string') { + string = String(str); + } + + if (charset === 'iso-8859-1') { + return escape(string).replace(/%u[0-9a-f]{4}/gi, function ($0) { + return '%26%23' + parseInt($0.slice(2), 16) + '%3B'; + }); + } + + let out = ''; + for (let j = 0; j < string.length; j += limit) { + const segment = string.length >= limit ? string.slice(j, j + limit) : string; + const arr = []; + + for (let i = 0; i < segment.length; ++i) { + let c = segment.charCodeAt(i); + if ( + c === 0x2d || // - + c === 0x2e || // . + c === 0x5f || // _ + c === 0x7e || // ~ + (c >= 0x30 && c <= 0x39) || // 0-9 + (c >= 0x41 && c <= 0x5a) || // a-z + (c >= 0x61 && c <= 0x7a) || // A-Z + (format === RFC1738 && (c === 0x28 || c === 0x29)) // ( ) + ) { + arr[arr.length] = segment.charAt(i); + continue; + } + + if (c < 0x80) { + arr[arr.length] = hex_table[c]; + continue; + } + + if (c < 0x800) { + arr[arr.length] = hex_table[0xc0 | (c >> 6)]! + hex_table[0x80 | (c & 0x3f)]; + continue; + } + + if (c < 0xd800 || c >= 0xe000) { + arr[arr.length] = + hex_table[0xe0 | (c >> 12)]! + hex_table[0x80 | ((c >> 6) & 0x3f)] + hex_table[0x80 | (c & 0x3f)]; + continue; + } + + i += 1; + c = 0x10000 + (((c & 0x3ff) << 10) | (segment.charCodeAt(i) & 0x3ff)); + + arr[arr.length] = + hex_table[0xf0 | (c >> 18)]! + + hex_table[0x80 | ((c >> 12) & 0x3f)] + + hex_table[0x80 | ((c >> 6) & 0x3f)] + + hex_table[0x80 | (c & 0x3f)]; + } + + out += arr.join(''); + } + + return out; +}; + +export function compact(value: any) { + const queue = [{ obj: { o: value }, prop: 'o' }]; + const refs = []; + + for (let i = 0; i < queue.length; ++i) { + const item = queue[i]; + // @ts-ignore + const obj = item.obj[item.prop]; + + const keys = Object.keys(obj); + for (let j = 0; j < keys.length; ++j) { + const key = keys[j]!; + const val = obj[key]; + if (typeof val === 'object' && val !== null && refs.indexOf(val) === -1) { + queue.push({ obj: obj, prop: key }); + refs.push(val); + } + } + } + + compact_queue(queue); + + return value; +} + +export function is_regexp(obj: any) { + return Object.prototype.toString.call(obj) === '[object RegExp]'; +} + +export function is_buffer(obj: any) { + if (!obj || typeof obj !== 'object') { + return false; + } + + return !!(obj.constructor && obj.constructor.isBuffer && obj.constructor.isBuffer(obj)); +} + +export function combine(a: any, b: any) { + return [].concat(a, b); +} + +export function maybe_map(val: T[], fn: (v: T) => T) { + if (is_array(val)) { + const mapped = []; + for (let i = 0; i < val.length; i += 1) { + mapped.push(fn(val[i]!)); + } + return mapped; + } + return fn(val); +} diff --git a/tests/qs/empty-keys-cases.ts b/tests/qs/empty-keys-cases.ts new file mode 100644 index 000000000..ea2c1b0a2 --- /dev/null +++ b/tests/qs/empty-keys-cases.ts @@ -0,0 +1,271 @@ +export const empty_test_cases = [ + { + input: '&', + with_empty_keys: {}, + stringify_output: { + brackets: '', + indices: '', + repeat: '', + }, + no_empty_keys: {}, + }, + { + input: '&&', + with_empty_keys: {}, + stringify_output: { + brackets: '', + indices: '', + repeat: '', + }, + no_empty_keys: {}, + }, + { + input: '&=', + with_empty_keys: { '': '' }, + stringify_output: { + brackets: '=', + indices: '=', + repeat: '=', + }, + no_empty_keys: {}, + }, + { + input: '&=&', + with_empty_keys: { '': '' }, + stringify_output: { + brackets: '=', + indices: '=', + repeat: '=', + }, + no_empty_keys: {}, + }, + { + input: '&=&=', + with_empty_keys: { '': ['', ''] }, + stringify_output: { + brackets: '[]=&[]=', + indices: '[0]=&[1]=', + repeat: '=&=', + }, + no_empty_keys: {}, + }, + { + input: '&=&=&', + with_empty_keys: { '': ['', ''] }, + stringify_output: { + brackets: '[]=&[]=', + indices: '[0]=&[1]=', + repeat: '=&=', + }, + no_empty_keys: {}, + }, + { + input: '=', + with_empty_keys: { '': '' }, + no_empty_keys: {}, + stringify_output: { + brackets: '=', + indices: '=', + repeat: '=', + }, + }, + { + input: '=&', + with_empty_keys: { '': '' }, + stringify_output: { + brackets: '=', + indices: '=', + repeat: '=', + }, + no_empty_keys: {}, + }, + { + input: '=&&&', + with_empty_keys: { '': '' }, + stringify_output: { + brackets: '=', + indices: '=', + repeat: '=', + }, + no_empty_keys: {}, + }, + { + input: '=&=&=&', + with_empty_keys: { '': ['', '', ''] }, + stringify_output: { + brackets: '[]=&[]=&[]=', + indices: '[0]=&[1]=&[2]=', + repeat: '=&=&=', + }, + no_empty_keys: {}, + }, + { + input: '=&a[]=b&a[1]=c', + with_empty_keys: { '': '', a: ['b', 'c'] }, + stringify_output: { + brackets: '=&a[]=b&a[]=c', + indices: '=&a[0]=b&a[1]=c', + repeat: '=&a=b&a=c', + }, + no_empty_keys: { a: ['b', 'c'] }, + }, + { + input: '=a', + with_empty_keys: { '': 'a' }, + no_empty_keys: {}, + stringify_output: { + brackets: '=a', + indices: '=a', + repeat: '=a', + }, + }, + { + input: 'a==a', + with_empty_keys: { a: '=a' }, + no_empty_keys: { a: '=a' }, + stringify_output: { + brackets: 'a==a', + indices: 'a==a', + repeat: 'a==a', + }, + }, + { + input: '=&a[]=b', + with_empty_keys: { '': '', a: ['b'] }, + stringify_output: { + brackets: '=&a[]=b', + indices: '=&a[0]=b', + repeat: '=&a=b', + }, + no_empty_keys: { a: ['b'] }, + }, + { + input: '=&a[]=b&a[]=c&a[2]=d', + with_empty_keys: { '': '', a: ['b', 'c', 'd'] }, + stringify_output: { + brackets: '=&a[]=b&a[]=c&a[]=d', + indices: '=&a[0]=b&a[1]=c&a[2]=d', + repeat: '=&a=b&a=c&a=d', + }, + no_empty_keys: { a: ['b', 'c', 'd'] }, + }, + { + input: '=a&=b', + with_empty_keys: { '': ['a', 'b'] }, + stringify_output: { + brackets: '[]=a&[]=b', + indices: '[0]=a&[1]=b', + repeat: '=a&=b', + }, + no_empty_keys: {}, + }, + { + input: '=a&foo=b', + with_empty_keys: { '': 'a', foo: 'b' }, + no_empty_keys: { foo: 'b' }, + stringify_output: { + brackets: '=a&foo=b', + indices: '=a&foo=b', + repeat: '=a&foo=b', + }, + }, + { + input: 'a[]=b&a=c&=', + with_empty_keys: { '': '', a: ['b', 'c'] }, + stringify_output: { + brackets: '=&a[]=b&a[]=c', + indices: '=&a[0]=b&a[1]=c', + repeat: '=&a=b&a=c', + }, + no_empty_keys: { a: ['b', 'c'] }, + }, + { + input: 'a[]=b&a=c&=', + with_empty_keys: { '': '', a: ['b', 'c'] }, + stringify_output: { + brackets: '=&a[]=b&a[]=c', + indices: '=&a[0]=b&a[1]=c', + repeat: '=&a=b&a=c', + }, + no_empty_keys: { a: ['b', 'c'] }, + }, + { + input: 'a[0]=b&a=c&=', + with_empty_keys: { '': '', a: ['b', 'c'] }, + stringify_output: { + brackets: '=&a[]=b&a[]=c', + indices: '=&a[0]=b&a[1]=c', + repeat: '=&a=b&a=c', + }, + no_empty_keys: { a: ['b', 'c'] }, + }, + { + input: 'a=b&a[]=c&=', + with_empty_keys: { '': '', a: ['b', 'c'] }, + stringify_output: { + brackets: '=&a[]=b&a[]=c', + indices: '=&a[0]=b&a[1]=c', + repeat: '=&a=b&a=c', + }, + no_empty_keys: { a: ['b', 'c'] }, + }, + { + input: 'a=b&a[0]=c&=', + with_empty_keys: { '': '', a: ['b', 'c'] }, + stringify_output: { + brackets: '=&a[]=b&a[]=c', + indices: '=&a[0]=b&a[1]=c', + repeat: '=&a=b&a=c', + }, + no_empty_keys: { a: ['b', 'c'] }, + }, + { + input: '[]=a&[]=b& []=1', + with_empty_keys: { '': ['a', 'b'], ' ': ['1'] }, + stringify_output: { + brackets: '[]=a&[]=b& []=1', + indices: '[0]=a&[1]=b& [0]=1', + repeat: '=a&=b& =1', + }, + no_empty_keys: { 0: 'a', 1: 'b', ' ': ['1'] }, + }, + { + input: '[0]=a&[1]=b&a[0]=1&a[1]=2', + with_empty_keys: { '': ['a', 'b'], a: ['1', '2'] }, + no_empty_keys: { 0: 'a', 1: 'b', a: ['1', '2'] }, + stringify_output: { + brackets: '[]=a&[]=b&a[]=1&a[]=2', + indices: '[0]=a&[1]=b&a[0]=1&a[1]=2', + repeat: '=a&=b&a=1&a=2', + }, + }, + { + input: '[deep]=a&[deep]=2', + with_empty_keys: { '': { deep: ['a', '2'] } }, + stringify_output: { + brackets: '[deep][]=a&[deep][]=2', + indices: '[deep][0]=a&[deep][1]=2', + repeat: '[deep]=a&[deep]=2', + }, + no_empty_keys: { deep: ['a', '2'] }, + }, + { + input: '%5B0%5D=a&%5B1%5D=b', + with_empty_keys: { '': ['a', 'b'] }, + stringify_output: { + brackets: '[]=a&[]=b', + indices: '[0]=a&[1]=b', + repeat: '=a&=b', + }, + no_empty_keys: { 0: 'a', 1: 'b' }, + }, +] satisfies { + input: string; + with_empty_keys: Record; + stringify_output: { + brackets: string; + indices: string; + repeat: string; + }; + no_empty_keys: Record; +}[]; diff --git a/tests/qs/stringify.test.ts b/tests/qs/stringify.test.ts new file mode 100644 index 000000000..ab3456824 --- /dev/null +++ b/tests/qs/stringify.test.ts @@ -0,0 +1,2232 @@ +import iconv from 'iconv-lite'; +import { stringify } from 'openai/internal/qs'; +import { encode } from 'openai/internal/qs/utils'; +import { StringifyOptions } from 'openai/internal/qs/types'; +import { empty_test_cases } from './empty-keys-cases'; +import assert from 'assert'; + +describe('stringify()', function () { + test('stringifies a querystring object', function () { + expect(stringify({ a: 'b' })).toBe('a=b'); + expect(stringify({ a: 1 })).toBe('a=1'); + expect(stringify({ a: 1, b: 2 })).toBe('a=1&b=2'); + expect(stringify({ a: 'A_Z' })).toBe('a=A_Z'); + expect(stringify({ a: '€' })).toBe('a=%E2%82%AC'); + expect(stringify({ a: '' })).toBe('a=%EE%80%80'); + expect(stringify({ a: 'א' })).toBe('a=%D7%90'); + expect(stringify({ a: '𐐷' })).toBe('a=%F0%90%90%B7'); + }); + + test('stringifies falsy values', function () { + expect(stringify(undefined)).toBe(''); + expect(stringify(null)).toBe(''); + expect(stringify(null, { strictNullHandling: true })).toBe(''); + expect(stringify(false)).toBe(''); + expect(stringify(0)).toBe(''); + }); + + test('stringifies symbols', function () { + expect(stringify(Symbol.iterator)).toBe(''); + expect(stringify([Symbol.iterator])).toBe('0=Symbol%28Symbol.iterator%29'); + expect(stringify({ a: Symbol.iterator })).toBe('a=Symbol%28Symbol.iterator%29'); + expect(stringify({ a: [Symbol.iterator] }, { encodeValuesOnly: true, arrayFormat: 'brackets' })).toBe( + 'a[]=Symbol%28Symbol.iterator%29', + ); + }); + + test('stringifies bigints', function () { + var three = BigInt(3); + // @ts-expect-error + var encodeWithN = function (value, defaultEncoder, charset) { + var result = defaultEncoder(value, defaultEncoder, charset); + return typeof value === 'bigint' ? result + 'n' : result; + }; + + expect(stringify(three)).toBe(''); + expect(stringify([three])).toBe('0=3'); + expect(stringify([three], { encoder: encodeWithN })).toBe('0=3n'); + expect(stringify({ a: three })).toBe('a=3'); + expect(stringify({ a: three }, { encoder: encodeWithN })).toBe('a=3n'); + expect(stringify({ a: [three] }, { encodeValuesOnly: true, arrayFormat: 'brackets' })).toBe('a[]=3'); + expect( + stringify({ a: [three] }, { encodeValuesOnly: true, encoder: encodeWithN, arrayFormat: 'brackets' }), + ).toBe('a[]=3n'); + }); + + test('encodes dot in key of object when encodeDotInKeys and allowDots is provided', function () { + expect( + stringify({ 'name.obj': { first: 'John', last: 'Doe' } }, { allowDots: false, encodeDotInKeys: false }), + ).toBe('name.obj%5Bfirst%5D=John&name.obj%5Blast%5D=Doe'); + expect( + stringify({ 'name.obj': { first: 'John', last: 'Doe' } }, { allowDots: true, encodeDotInKeys: false }), + ).toBe('name.obj.first=John&name.obj.last=Doe'); + expect( + stringify({ 'name.obj': { first: 'John', last: 'Doe' } }, { allowDots: false, encodeDotInKeys: true }), + ).toBe('name%252Eobj%5Bfirst%5D=John&name%252Eobj%5Blast%5D=Doe'); + expect( + stringify({ 'name.obj': { first: 'John', last: 'Doe' } }, { allowDots: true, encodeDotInKeys: true }), + ).toBe('name%252Eobj.first=John&name%252Eobj.last=Doe'); + + // st.equal( + // stringify( + // { 'name.obj.subobject': { 'first.godly.name': 'John', last: 'Doe' } }, + // { allowDots: false, encodeDotInKeys: false }, + // ), + // 'name.obj.subobject%5Bfirst.godly.name%5D=John&name.obj.subobject%5Blast%5D=Doe', + // 'with allowDots false and encodeDotInKeys false', + // ); + // st.equal( + // stringify( + // { 'name.obj.subobject': { 'first.godly.name': 'John', last: 'Doe' } }, + // { allowDots: true, encodeDotInKeys: false }, + // ), + // 'name.obj.subobject.first.godly.name=John&name.obj.subobject.last=Doe', + // 'with allowDots false and encodeDotInKeys false', + // ); + // st.equal( + // stringify( + // { 'name.obj.subobject': { 'first.godly.name': 'John', last: 'Doe' } }, + // { allowDots: false, encodeDotInKeys: true }, + // ), + // 'name%252Eobj%252Esubobject%5Bfirst.godly.name%5D=John&name%252Eobj%252Esubobject%5Blast%5D=Doe', + // 'with allowDots false and encodeDotInKeys true', + // ); + // st.equal( + // stringify( + // { 'name.obj.subobject': { 'first.godly.name': 'John', last: 'Doe' } }, + // { allowDots: true, encodeDotInKeys: true }, + // ), + // 'name%252Eobj%252Esubobject.first%252Egodly%252Ename=John&name%252Eobj%252Esubobject.last=Doe', + // 'with allowDots true and encodeDotInKeys true', + // ); + expect( + stringify( + { 'name.obj.subobject': { 'first.godly.name': 'John', last: 'Doe' } }, + { allowDots: false, encodeDotInKeys: false }, + ), + ).toBe('name.obj.subobject%5Bfirst.godly.name%5D=John&name.obj.subobject%5Blast%5D=Doe'); + expect( + stringify( + { 'name.obj.subobject': { 'first.godly.name': 'John', last: 'Doe' } }, + { allowDots: true, encodeDotInKeys: false }, + ), + ).toBe('name.obj.subobject.first.godly.name=John&name.obj.subobject.last=Doe'); + expect( + stringify( + { 'name.obj.subobject': { 'first.godly.name': 'John', last: 'Doe' } }, + { allowDots: false, encodeDotInKeys: true }, + ), + ).toBe('name%252Eobj%252Esubobject%5Bfirst.godly.name%5D=John&name%252Eobj%252Esubobject%5Blast%5D=Doe'); + expect( + stringify( + { 'name.obj.subobject': { 'first.godly.name': 'John', last: 'Doe' } }, + { allowDots: true, encodeDotInKeys: true }, + ), + ).toBe('name%252Eobj%252Esubobject.first%252Egodly%252Ename=John&name%252Eobj%252Esubobject.last=Doe'); + }); + + test('should encode dot in key of object, and automatically set allowDots to `true` when encodeDotInKeys is true and allowDots in undefined', function () { + // st.equal( + // stringify( + // { 'name.obj.subobject': { 'first.godly.name': 'John', last: 'Doe' } }, + // { encodeDotInKeys: true }, + // ), + // 'name%252Eobj%252Esubobject.first%252Egodly%252Ename=John&name%252Eobj%252Esubobject.last=Doe', + // 'with allowDots undefined and encodeDotInKeys true', + // ); + expect( + stringify( + { 'name.obj.subobject': { 'first.godly.name': 'John', last: 'Doe' } }, + { encodeDotInKeys: true }, + ), + ).toBe('name%252Eobj%252Esubobject.first%252Egodly%252Ename=John&name%252Eobj%252Esubobject.last=Doe'); + }); + + test('should encode dot in key of object when encodeDotInKeys and allowDots is provided, and nothing else when encodeValuesOnly is provided', function () { + // st.equal( + // stringify( + // { 'name.obj': { first: 'John', last: 'Doe' } }, + // { + // encodeDotInKeys: true, + // allowDots: true, + // encodeValuesOnly: true, + // }, + // ), + // 'name%2Eobj.first=John&name%2Eobj.last=Doe', + // ); + expect( + stringify( + { 'name.obj': { first: 'John', last: 'Doe' } }, + { + encodeDotInKeys: true, + allowDots: true, + encodeValuesOnly: true, + }, + ), + ).toBe('name%2Eobj.first=John&name%2Eobj.last=Doe'); + + // st.equal( + // stringify( + // { 'name.obj.subobject': { 'first.godly.name': 'John', last: 'Doe' } }, + // { allowDots: true, encodeDotInKeys: true, encodeValuesOnly: true }, + // ), + // 'name%2Eobj%2Esubobject.first%2Egodly%2Ename=John&name%2Eobj%2Esubobject.last=Doe', + // ); + expect( + stringify( + { 'name.obj.subobject': { 'first.godly.name': 'John', last: 'Doe' } }, + { allowDots: true, encodeDotInKeys: true, encodeValuesOnly: true }, + ), + ).toBe('name%2Eobj%2Esubobject.first%2Egodly%2Ename=John&name%2Eobj%2Esubobject.last=Doe'); + }); + + test('throws when `commaRoundTrip` is not a boolean', function () { + // st['throws']( + // function () { + // stringify({}, { commaRoundTrip: 'not a boolean' }); + // }, + // TypeError, + // 'throws when `commaRoundTrip` is not a boolean', + // ); + expect(() => { + // @ts-expect-error + stringify({}, { commaRoundTrip: 'not a boolean' }); + }).toThrow(TypeError); + }); + + test('throws when `encodeDotInKeys` is not a boolean', function () { + // st['throws'](function () { + // stringify({ a: [], b: 'zz' }, { encodeDotInKeys: 'foobar' }); + // }, TypeError); + expect(() => { + // @ts-expect-error + stringify({ a: [], b: 'zz' }, { encodeDotInKeys: 'foobar' }); + }).toThrow(TypeError); + + // st['throws'](function () { + // stringify({ a: [], b: 'zz' }, { encodeDotInKeys: 0 }); + // }, TypeError); + expect(() => { + // @ts-expect-error + stringify({ a: [], b: 'zz' }, { encodeDotInKeys: 0 }); + }).toThrow(TypeError); + + // st['throws'](function () { + // stringify({ a: [], b: 'zz' }, { encodeDotInKeys: NaN }); + // }, TypeError); + expect(() => { + // @ts-expect-error + stringify({ a: [], b: 'zz' }, { encodeDotInKeys: NaN }); + }).toThrow(TypeError); + + // st['throws'](function () { + // stringify({ a: [], b: 'zz' }, { encodeDotInKeys: null }); + // }, TypeError); + expect(() => { + // @ts-expect-error + stringify({ a: [], b: 'zz' }, { encodeDotInKeys: null }); + }).toThrow(TypeError); + }); + + test('adds query prefix', function () { + // st.equal(stringify({ a: 'b' }, { addQueryPrefix: true }), '?a=b'); + expect(stringify({ a: 'b' }, { addQueryPrefix: true })).toBe('?a=b'); + }); + + test('with query prefix, outputs blank string given an empty object', function () { + // st.equal(stringify({}, { addQueryPrefix: true }), ''); + expect(stringify({}, { addQueryPrefix: true })).toBe(''); + }); + + test('stringifies nested falsy values', function () { + // st.equal(stringify({ a: { b: { c: null } } }), 'a%5Bb%5D%5Bc%5D='); + // st.equal( + // stringify({ a: { b: { c: null } } }, { strictNullHandling: true }), + // 'a%5Bb%5D%5Bc%5D', + // ); + // st.equal(stringify({ a: { b: { c: false } } }), 'a%5Bb%5D%5Bc%5D=false'); + expect(stringify({ a: { b: { c: null } } })).toBe('a%5Bb%5D%5Bc%5D='); + expect(stringify({ a: { b: { c: null } } }, { strictNullHandling: true })).toBe('a%5Bb%5D%5Bc%5D'); + expect(stringify({ a: { b: { c: false } } })).toBe('a%5Bb%5D%5Bc%5D=false'); + }); + + test('stringifies a nested object', function () { + // st.equal(stringify({ a: { b: 'c' } }), 'a%5Bb%5D=c'); + // st.equal(stringify({ a: { b: { c: { d: 'e' } } } }), 'a%5Bb%5D%5Bc%5D%5Bd%5D=e'); + expect(stringify({ a: { b: 'c' } })).toBe('a%5Bb%5D=c'); + expect(stringify({ a: { b: { c: { d: 'e' } } } })).toBe('a%5Bb%5D%5Bc%5D%5Bd%5D=e'); + }); + + test('`allowDots` option: stringifies a nested object with dots notation', function () { + // st.equal(stringify({ a: { b: 'c' } }, { allowDots: true }), 'a.b=c'); + // st.equal(stringify({ a: { b: { c: { d: 'e' } } } }, { allowDots: true }), 'a.b.c.d=e'); + expect(stringify({ a: { b: 'c' } }, { allowDots: true })).toBe('a.b=c'); + expect(stringify({ a: { b: { c: { d: 'e' } } } }, { allowDots: true })).toBe('a.b.c.d=e'); + }); + + test('stringifies an array value', function () { + // st.equal( + // stringify({ a: ['b', 'c', 'd'] }, { arrayFormat: 'indices' }), + // 'a%5B0%5D=b&a%5B1%5D=c&a%5B2%5D=d', + // 'indices => indices', + // ); + // st.equal( + // stringify({ a: ['b', 'c', 'd'] }, { arrayFormat: 'brackets' }), + // 'a%5B%5D=b&a%5B%5D=c&a%5B%5D=d', + // 'brackets => brackets', + // ); + // st.equal( + // stringify({ a: ['b', 'c', 'd'] }, { arrayFormat: 'comma' }), + // 'a=b%2Cc%2Cd', + // 'comma => comma', + // ); + // st.equal( + // stringify({ a: ['b', 'c', 'd'] }, { arrayFormat: 'comma', commaRoundTrip: true }), + // 'a=b%2Cc%2Cd', + // 'comma round trip => comma', + // ); + // st.equal( + // stringify({ a: ['b', 'c', 'd'] }), + // 'a%5B0%5D=b&a%5B1%5D=c&a%5B2%5D=d', + // 'default => indices', + // ); + expect(stringify({ a: ['b', 'c', 'd'] }, { arrayFormat: 'indices' })).toBe( + 'a%5B0%5D=b&a%5B1%5D=c&a%5B2%5D=d', + ); + expect(stringify({ a: ['b', 'c', 'd'] }, { arrayFormat: 'brackets' })).toBe( + 'a%5B%5D=b&a%5B%5D=c&a%5B%5D=d', + ); + expect(stringify({ a: ['b', 'c', 'd'] }, { arrayFormat: 'comma' })).toBe('a=b%2Cc%2Cd'); + expect(stringify({ a: ['b', 'c', 'd'] }, { arrayFormat: 'comma', commaRoundTrip: true })).toBe( + 'a=b%2Cc%2Cd', + ); + expect(stringify({ a: ['b', 'c', 'd'] })).toBe('a%5B0%5D=b&a%5B1%5D=c&a%5B2%5D=d'); + }); + + test('`skipNulls` option', function () { + // st.equal( + // stringify({ a: 'b', c: null }, { skipNulls: true }), + // 'a=b', + // 'omits nulls when asked', + // ); + expect(stringify({ a: 'b', c: null }, { skipNulls: true })).toBe('a=b'); + + // st.equal( + // stringify({ a: { b: 'c', d: null } }, { skipNulls: true }), + // 'a%5Bb%5D=c', + // 'omits nested nulls when asked', + // ); + expect(stringify({ a: { b: 'c', d: null } }, { skipNulls: true })).toBe('a%5Bb%5D=c'); + }); + + test('omits array indices when asked', function () { + // st.equal(stringify({ a: ['b', 'c', 'd'] }, { indices: false }), 'a=b&a=c&a=d'); + expect(stringify({ a: ['b', 'c', 'd'] }, { indices: false })).toBe('a=b&a=c&a=d'); + }); + + test('omits object key/value pair when value is empty array', function () { + // st.equal(stringify({ a: [], b: 'zz' }), 'b=zz'); + expect(stringify({ a: [], b: 'zz' })).toBe('b=zz'); + }); + + test('should not omit object key/value pair when value is empty array and when asked', function () { + // st.equal(stringify({ a: [], b: 'zz' }), 'b=zz'); + // st.equal(stringify({ a: [], b: 'zz' }, { allowEmptyArrays: false }), 'b=zz'); + // st.equal(stringify({ a: [], b: 'zz' }, { allowEmptyArrays: true }), 'a[]&b=zz'); + expect(stringify({ a: [], b: 'zz' })).toBe('b=zz'); + expect(stringify({ a: [], b: 'zz' }, { allowEmptyArrays: false })).toBe('b=zz'); + expect(stringify({ a: [], b: 'zz' }, { allowEmptyArrays: true })).toBe('a[]&b=zz'); + }); + + test('should throw when allowEmptyArrays is not of type boolean', function () { + // st['throws'](function () { + // stringify({ a: [], b: 'zz' }, { allowEmptyArrays: 'foobar' }); + // }, TypeError); + expect(() => { + // @ts-expect-error + stringify({ a: [], b: 'zz' }, { allowEmptyArrays: 'foobar' }); + }).toThrow(TypeError); + + // st['throws'](function () { + // stringify({ a: [], b: 'zz' }, { allowEmptyArrays: 0 }); + // }, TypeError); + expect(() => { + // @ts-expect-error + stringify({ a: [], b: 'zz' }, { allowEmptyArrays: 0 }); + }).toThrow(TypeError); + + // st['throws'](function () { + // stringify({ a: [], b: 'zz' }, { allowEmptyArrays: NaN }); + // }, TypeError); + expect(() => { + // @ts-expect-error + stringify({ a: [], b: 'zz' }, { allowEmptyArrays: NaN }); + }).toThrow(TypeError); + + // st['throws'](function () { + // stringify({ a: [], b: 'zz' }, { allowEmptyArrays: null }); + // }, TypeError); + expect(() => { + // @ts-expect-error + stringify({ a: [], b: 'zz' }, { allowEmptyArrays: null }); + }).toThrow(TypeError); + }); + + test('allowEmptyArrays + strictNullHandling', function () { + // st.equal( + // stringify({ testEmptyArray: [] }, { strictNullHandling: true, allowEmptyArrays: true }), + // 'testEmptyArray[]', + // ); + expect(stringify({ testEmptyArray: [] }, { strictNullHandling: true, allowEmptyArrays: true })).toBe( + 'testEmptyArray[]', + ); + }); + + describe('stringifies an array value with one item vs multiple items', function () { + test('non-array item', function () { + // s2t.equal( + // stringify({ a: 'c' }, { encodeValuesOnly: true, arrayFormat: 'indices' }), + // 'a=c', + // ); + // s2t.equal( + // stringify({ a: 'c' }, { encodeValuesOnly: true, arrayFormat: 'brackets' }), + // 'a=c', + // ); + // s2t.equal(stringify({ a: 'c' }, { encodeValuesOnly: true, arrayFormat: 'comma' }), 'a=c'); + // s2t.equal(stringify({ a: 'c' }, { encodeValuesOnly: true }), 'a=c'); + expect(stringify({ a: 'c' }, { encodeValuesOnly: true, arrayFormat: 'indices' })).toBe('a=c'); + expect(stringify({ a: 'c' }, { encodeValuesOnly: true, arrayFormat: 'brackets' })).toBe('a=c'); + expect(stringify({ a: 'c' }, { encodeValuesOnly: true, arrayFormat: 'comma' })).toBe('a=c'); + expect(stringify({ a: 'c' }, { encodeValuesOnly: true })).toBe('a=c'); + }); + + test('array with a single item', function () { + // s2t.equal( + // stringify({ a: ['c'] }, { encodeValuesOnly: true, arrayFormat: 'indices' }), + // 'a[0]=c', + // ); + // s2t.equal( + // stringify({ a: ['c'] }, { encodeValuesOnly: true, arrayFormat: 'brackets' }), + // 'a[]=c', + // ); + // s2t.equal( + // stringify({ a: ['c'] }, { encodeValuesOnly: true, arrayFormat: 'comma' }), + // 'a=c', + // ); + // s2t.equal( + // stringify( + // { a: ['c'] }, + // { encodeValuesOnly: true, arrayFormat: 'comma', commaRoundTrip: true }, + // ), + // 'a[]=c', + // ); // so it parses back as an array + // s2t.equal(stringify({ a: ['c'] }, { encodeValuesOnly: true }), 'a[0]=c'); + expect(stringify({ a: ['c'] }, { encodeValuesOnly: true, arrayFormat: 'indices' })).toBe('a[0]=c'); + expect(stringify({ a: ['c'] }, { encodeValuesOnly: true, arrayFormat: 'brackets' })).toBe('a[]=c'); + expect(stringify({ a: ['c'] }, { encodeValuesOnly: true, arrayFormat: 'comma' })).toBe('a=c'); + expect( + stringify({ a: ['c'] }, { encodeValuesOnly: true, arrayFormat: 'comma', commaRoundTrip: true }), + ).toBe('a[]=c'); + expect(stringify({ a: ['c'] }, { encodeValuesOnly: true })).toBe('a[0]=c'); + }); + + test('array with multiple items', function () { + // s2t.equal( + // stringify({ a: ['c', 'd'] }, { encodeValuesOnly: true, arrayFormat: 'indices' }), + // 'a[0]=c&a[1]=d', + // ); + // s2t.equal( + // stringify({ a: ['c', 'd'] }, { encodeValuesOnly: true, arrayFormat: 'brackets' }), + // 'a[]=c&a[]=d', + // ); + // s2t.equal( + // stringify({ a: ['c', 'd'] }, { encodeValuesOnly: true, arrayFormat: 'comma' }), + // 'a=c,d', + // ); + // s2t.equal( + // stringify( + // { a: ['c', 'd'] }, + // { encodeValuesOnly: true, arrayFormat: 'comma', commaRoundTrip: true }, + // ), + // 'a=c,d', + // ); + // s2t.equal(stringify({ a: ['c', 'd'] }, { encodeValuesOnly: true }), 'a[0]=c&a[1]=d'); + expect(stringify({ a: ['c', 'd'] }, { encodeValuesOnly: true, arrayFormat: 'indices' })).toBe( + 'a[0]=c&a[1]=d', + ); + expect(stringify({ a: ['c', 'd'] }, { encodeValuesOnly: true, arrayFormat: 'brackets' })).toBe( + 'a[]=c&a[]=d', + ); + expect(stringify({ a: ['c', 'd'] }, { encodeValuesOnly: true, arrayFormat: 'comma' })).toBe('a=c,d'); + expect( + stringify({ a: ['c', 'd'] }, { encodeValuesOnly: true, arrayFormat: 'comma', commaRoundTrip: true }), + ).toBe('a=c,d'); + expect(stringify({ a: ['c', 'd'] }, { encodeValuesOnly: true })).toBe('a[0]=c&a[1]=d'); + }); + + test('array with multiple items with a comma inside', function () { + // s2t.equal( + // stringify({ a: ['c,d', 'e'] }, { encodeValuesOnly: true, arrayFormat: 'comma' }), + // 'a=c%2Cd,e', + // ); + // s2t.equal(stringify({ a: ['c,d', 'e'] }, { arrayFormat: 'comma' }), 'a=c%2Cd%2Ce'); + expect(stringify({ a: ['c,d', 'e'] }, { encodeValuesOnly: true, arrayFormat: 'comma' })).toBe( + 'a=c%2Cd,e', + ); + expect(stringify({ a: ['c,d', 'e'] }, { arrayFormat: 'comma' })).toBe('a=c%2Cd%2Ce'); + + // s2t.equal( + // stringify( + // { a: ['c,d', 'e'] }, + // { encodeValuesOnly: true, arrayFormat: 'comma', commaRoundTrip: true }, + // ), + // 'a=c%2Cd,e', + // ); + // s2t.equal( + // stringify({ a: ['c,d', 'e'] }, { arrayFormat: 'comma', commaRoundTrip: true }), + // 'a=c%2Cd%2Ce', + // ); + expect( + stringify( + { a: ['c,d', 'e'] }, + { encodeValuesOnly: true, arrayFormat: 'comma', commaRoundTrip: true }, + ), + ).toBe('a=c%2Cd,e'); + expect(stringify({ a: ['c,d', 'e'] }, { arrayFormat: 'comma', commaRoundTrip: true })).toBe( + 'a=c%2Cd%2Ce', + ); + }); + }); + + test('stringifies a nested array value', function () { + expect(stringify({ a: { b: ['c', 'd'] } }, { encodeValuesOnly: true, arrayFormat: 'indices' })).toBe( + 'a[b][0]=c&a[b][1]=d', + ); + expect(stringify({ a: { b: ['c', 'd'] } }, { encodeValuesOnly: true, arrayFormat: 'brackets' })).toBe( + 'a[b][]=c&a[b][]=d', + ); + expect(stringify({ a: { b: ['c', 'd'] } }, { encodeValuesOnly: true, arrayFormat: 'comma' })).toBe( + 'a[b]=c,d', + ); + expect(stringify({ a: { b: ['c', 'd'] } }, { encodeValuesOnly: true })).toBe('a[b][0]=c&a[b][1]=d'); + }); + + test('stringifies comma and empty array values', function () { + // st.equal( + // stringify({ a: [',', '', 'c,d%'] }, { encode: false, arrayFormat: 'indices' }), + // 'a[0]=,&a[1]=&a[2]=c,d%', + // ); + // st.equal( + // stringify({ a: [',', '', 'c,d%'] }, { encode: false, arrayFormat: 'brackets' }), + // 'a[]=,&a[]=&a[]=c,d%', + // ); + // st.equal( + // stringify({ a: [',', '', 'c,d%'] }, { encode: false, arrayFormat: 'comma' }), + // 'a=,,,c,d%', + // ); + // st.equal( + // stringify({ a: [',', '', 'c,d%'] }, { encode: false, arrayFormat: 'repeat' }), + // 'a=,&a=&a=c,d%', + // ); + expect(stringify({ a: [',', '', 'c,d%'] }, { encode: false, arrayFormat: 'indices' })).toBe( + 'a[0]=,&a[1]=&a[2]=c,d%', + ); + expect(stringify({ a: [',', '', 'c,d%'] }, { encode: false, arrayFormat: 'brackets' })).toBe( + 'a[]=,&a[]=&a[]=c,d%', + ); + expect(stringify({ a: [',', '', 'c,d%'] }, { encode: false, arrayFormat: 'comma' })).toBe('a=,,,c,d%'); + expect(stringify({ a: [',', '', 'c,d%'] }, { encode: false, arrayFormat: 'repeat' })).toBe( + 'a=,&a=&a=c,d%', + ); + + // st.equal( + // stringify( + // { a: [',', '', 'c,d%'] }, + // { encode: true, encodeValuesOnly: true, arrayFormat: 'indices' }, + // ), + // 'a[0]=%2C&a[1]=&a[2]=c%2Cd%25', + // ); + // st.equal( + // stringify( + // { a: [',', '', 'c,d%'] }, + // { encode: true, encodeValuesOnly: true, arrayFormat: 'brackets' }, + // ), + // 'a[]=%2C&a[]=&a[]=c%2Cd%25', + // ); + // st.equal( + // stringify( + // { a: [',', '', 'c,d%'] }, + // { encode: true, encodeValuesOnly: true, arrayFormat: 'comma' }, + // ), + // 'a=%2C,,c%2Cd%25', + // ); + // st.equal( + // stringify( + // { a: [',', '', 'c,d%'] }, + // { encode: true, encodeValuesOnly: true, arrayFormat: 'repeat' }, + // ), + // 'a=%2C&a=&a=c%2Cd%25', + // ); + expect( + stringify({ a: [',', '', 'c,d%'] }, { encode: true, encodeValuesOnly: false, arrayFormat: 'indices' }), + ).toBe('a%5B0%5D=%2C&a%5B1%5D=&a%5B2%5D=c%2Cd%25'); + expect( + stringify({ a: [',', '', 'c,d%'] }, { encode: true, encodeValuesOnly: true, arrayFormat: 'brackets' }), + ).toBe('a[]=%2C&a[]=&a[]=c%2Cd%25'); + expect( + stringify({ a: [',', '', 'c,d%'] }, { encode: true, encodeValuesOnly: false, arrayFormat: 'comma' }), + ).toBe('a=%2C%2C%2Cc%2Cd%25'); + expect( + stringify({ a: [',', '', 'c,d%'] }, { encode: true, encodeValuesOnly: false, arrayFormat: 'repeat' }), + ).toBe('a=%2C&a=&a=c%2Cd%25'); + + // st.equal( + // stringify( + // { a: [',', '', 'c,d%'] }, + // { encode: true, encodeValuesOnly: false, arrayFormat: 'indices' }, + // ), + // 'a%5B0%5D=%2C&a%5B1%5D=&a%5B2%5D=c%2Cd%25', + // ); + // st.equal( + // stringify( + // { a: [',', '', 'c,d%'] }, + // { encode: true, encodeValuesOnly: false, arrayFormat: 'brackets' }, + // ), + // 'a%5B%5D=%2C&a%5B%5D=&a%5B%5D=c%2Cd%25', + // ); + // st.equal( + // stringify( + // { a: [',', '', 'c,d%'] }, + // { encode: true, encodeValuesOnly: false, arrayFormat: 'comma' }, + // ), + // 'a=%2C%2C%2Cc%2Cd%25', + // ); + // st.equal( + // stringify( + // { a: [',', '', 'c,d%'] }, + // { encode: true, encodeValuesOnly: false, arrayFormat: 'repeat' }, + // ), + // 'a=%2C&a=&a=c%2Cd%25', + // ); + expect( + stringify({ a: [',', '', 'c,d%'] }, { encode: true, encodeValuesOnly: false, arrayFormat: 'repeat' }), + ).toBe('a=%2C&a=&a=c%2Cd%25'); + expect( + stringify({ a: [',', '', 'c,d%'] }, { encode: true, encodeValuesOnly: false, arrayFormat: 'indices' }), + ).toBe('a%5B0%5D=%2C&a%5B1%5D=&a%5B2%5D=c%2Cd%25'); + expect( + stringify({ a: [',', '', 'c,d%'] }, { encode: true, encodeValuesOnly: true, arrayFormat: 'brackets' }), + ).toBe('a[]=%2C&a[]=&a[]=c%2Cd%25'); + expect( + stringify({ a: [',', '', 'c,d%'] }, { encode: true, encodeValuesOnly: false, arrayFormat: 'comma' }), + ).toBe('a=%2C%2C%2Cc%2Cd%25'); + expect( + stringify({ a: [',', '', 'c,d%'] }, { encode: true, encodeValuesOnly: false, arrayFormat: 'repeat' }), + ).toBe('a=%2C&a=&a=c%2Cd%25'); + }); + + test('stringifies comma and empty non-array values', function () { + // st.equal( + // stringify({ a: ',', b: '', c: 'c,d%' }, { encode: false, arrayFormat: 'indices' }), + // 'a=,&b=&c=c,d%', + // ); + // st.equal( + // stringify({ a: ',', b: '', c: 'c,d%' }, { encode: false, arrayFormat: 'brackets' }), + // 'a=,&b=&c=c,d%', + // ); + // st.equal( + // stringify({ a: ',', b: '', c: 'c,d%' }, { encode: false, arrayFormat: 'comma' }), + // 'a=,&b=&c=c,d%', + // ); + // st.equal( + // stringify({ a: ',', b: '', c: 'c,d%' }, { encode: false, arrayFormat: 'repeat' }), + // 'a=,&b=&c=c,d%', + // ); + expect(stringify({ a: ',', b: '', c: 'c,d%' }, { encode: false, arrayFormat: 'indices' })).toBe( + 'a=,&b=&c=c,d%', + ); + expect(stringify({ a: ',', b: '', c: 'c,d%' }, { encode: false, arrayFormat: 'brackets' })).toBe( + 'a=,&b=&c=c,d%', + ); + + // st.equal( + // stringify( + // { a: ',', b: '', c: 'c,d%' }, + // { encode: true, encodeValuesOnly: true, arrayFormat: 'indices' }, + // ), + // 'a=%2C&b=&c=c%2Cd%25', + // ); + // st.equal( + // stringify( + // { a: ',', b: '', c: 'c,d%' }, + // { encode: true, encodeValuesOnly: true, arrayFormat: 'brackets' }, + // ), + // 'a=%2C&b=&c=c%2Cd%25', + // ); + // st.equal( + // stringify( + // { a: ',', b: '', c: 'c,d%' }, + // { encode: true, encodeValuesOnly: true, arrayFormat: 'comma' }, + // ), + // 'a=%2C&b=&c=c%2Cd%25', + // ); + // st.equal( + // stringify( + // { a: ',', b: '', c: 'c,d%' }, + // { encode: true, encodeValuesOnly: true, arrayFormat: 'repeat' }, + // ), + // 'a=%2C&b=&c=c%2Cd%25', + // ); + expect( + stringify( + { a: ',', b: '', c: 'c,d%' }, + { encode: true, encodeValuesOnly: true, arrayFormat: 'indices' }, + ), + ).toBe('a=%2C&b=&c=c%2Cd%25'); + expect( + stringify( + { a: ',', b: '', c: 'c,d%' }, + { encode: true, encodeValuesOnly: true, arrayFormat: 'brackets' }, + ), + ).toBe('a=%2C&b=&c=c%2Cd%25'); + expect( + stringify({ a: ',', b: '', c: 'c,d%' }, { encode: true, encodeValuesOnly: true, arrayFormat: 'comma' }), + ).toBe('a=%2C&b=&c=c%2Cd%25'); + expect( + stringify( + { a: ',', b: '', c: 'c,d%' }, + { encode: true, encodeValuesOnly: true, arrayFormat: 'repeat' }, + ), + ).toBe('a=%2C&b=&c=c%2Cd%25'); + + // st.equal( + // stringify( + // { a: ',', b: '', c: 'c,d%' }, + // { encode: true, encodeValuesOnly: false, arrayFormat: 'indices' }, + // ), + // 'a=%2C&b=&c=c%2Cd%25', + // ); + // st.equal( + // stringify( + // { a: ',', b: '', c: 'c,d%' }, + // { encode: true, encodeValuesOnly: false, arrayFormat: 'brackets' }, + // ), + // 'a=%2C&b=&c=c%2Cd%25', + // ); + // st.equal( + // stringify( + // { a: ',', b: '', c: 'c,d%' }, + // { encode: true, encodeValuesOnly: false, arrayFormat: 'comma' }, + // ), + // 'a=%2C&b=&c=c%2Cd%25', + // ); + // st.equal( + // stringify( + // { a: ',', b: '', c: 'c,d%' }, + // { encode: true, encodeValuesOnly: false, arrayFormat: 'repeat' }, + // ), + // 'a=%2C&b=&c=c%2Cd%25', + // ); + expect( + stringify( + { a: ',', b: '', c: 'c,d%' }, + { encode: true, encodeValuesOnly: false, arrayFormat: 'indices' }, + ), + ).toBe('a=%2C&b=&c=c%2Cd%25'); + expect( + stringify( + { a: ',', b: '', c: 'c,d%' }, + { encode: true, encodeValuesOnly: false, arrayFormat: 'brackets' }, + ), + ).toBe('a=%2C&b=&c=c%2Cd%25'); + expect( + stringify( + { a: ',', b: '', c: 'c,d%' }, + { encode: true, encodeValuesOnly: false, arrayFormat: 'comma' }, + ), + ).toBe('a=%2C&b=&c=c%2Cd%25'); + expect( + stringify( + { a: ',', b: '', c: 'c,d%' }, + { encode: true, encodeValuesOnly: false, arrayFormat: 'repeat' }, + ), + ).toBe('a=%2C&b=&c=c%2Cd%25'); + }); + + test('stringifies a nested array value with dots notation', function () { + // st.equal( + // stringify( + // { a: { b: ['c', 'd'] } }, + // { allowDots: true, encodeValuesOnly: true, arrayFormat: 'indices' }, + // ), + // 'a.b[0]=c&a.b[1]=d', + // 'indices: stringifies with dots + indices', + // ); + // st.equal( + // stringify( + // { a: { b: ['c', 'd'] } }, + // { allowDots: true, encodeValuesOnly: true, arrayFormat: 'brackets' }, + // ), + // 'a.b[]=c&a.b[]=d', + // 'brackets: stringifies with dots + brackets', + // ); + // st.equal( + // stringify( + // { a: { b: ['c', 'd'] } }, + // { allowDots: true, encodeValuesOnly: true, arrayFormat: 'comma' }, + // ), + // 'a.b=c,d', + // 'comma: stringifies with dots + comma', + // ); + // st.equal( + // stringify({ a: { b: ['c', 'd'] } }, { allowDots: true, encodeValuesOnly: true }), + // 'a.b[0]=c&a.b[1]=d', + // 'default: stringifies with dots + indices', + // ); + expect( + stringify( + { a: { b: ['c', 'd'] } }, + { allowDots: true, encodeValuesOnly: true, arrayFormat: 'indices' }, + ), + ).toBe('a.b[0]=c&a.b[1]=d'); + expect( + stringify( + { a: { b: ['c', 'd'] } }, + { allowDots: true, encodeValuesOnly: true, arrayFormat: 'brackets' }, + ), + ).toBe('a.b[]=c&a.b[]=d'); + expect( + stringify({ a: { b: ['c', 'd'] } }, { allowDots: true, encodeValuesOnly: true, arrayFormat: 'comma' }), + ).toBe('a.b=c,d'); + expect(stringify({ a: { b: ['c', 'd'] } }, { allowDots: true, encodeValuesOnly: true })).toBe( + 'a.b[0]=c&a.b[1]=d', + ); + }); + + test('stringifies an object inside an array', function () { + // st.equal( + // stringify({ a: [{ b: 'c' }] }, { arrayFormat: 'indices', encodeValuesOnly: true }), + // 'a[0][b]=c', + // 'indices => indices', + // ); + // st.equal( + // stringify({ a: [{ b: 'c' }] }, { arrayFormat: 'repeat', encodeValuesOnly: true }), + // 'a[b]=c', + // 'repeat => repeat', + // ); + // st.equal( + // stringify({ a: [{ b: 'c' }] }, { arrayFormat: 'brackets', encodeValuesOnly: true }), + // 'a[][b]=c', + // 'brackets => brackets', + // ); + // st.equal( + // stringify({ a: [{ b: 'c' }] }, { encodeValuesOnly: true }), + // 'a[0][b]=c', + // 'default => indices', + // ); + expect(stringify({ a: [{ b: 'c' }] }, { arrayFormat: 'indices', encodeValuesOnly: true })).toBe( + 'a[0][b]=c', + ); + expect(stringify({ a: [{ b: 'c' }] }, { arrayFormat: 'repeat', encodeValuesOnly: true })).toBe('a[b]=c'); + expect(stringify({ a: [{ b: 'c' }] }, { arrayFormat: 'brackets', encodeValuesOnly: true })).toBe( + 'a[][b]=c', + ); + expect(stringify({ a: [{ b: 'c' }] }, { encodeValuesOnly: true })).toBe('a[0][b]=c'); + + // st.equal( + // stringify({ a: [{ b: { c: [1] } }] }, { arrayFormat: 'indices', encodeValuesOnly: true }), + // 'a[0][b][c][0]=1', + // 'indices => indices', + // ); + // st.equal( + // stringify({ a: [{ b: { c: [1] } }] }, { arrayFormat: 'repeat', encodeValuesOnly: true }), + // 'a[b][c]=1', + // 'repeat => repeat', + // ); + // st.equal( + // stringify({ a: [{ b: { c: [1] } }] }, { arrayFormat: 'brackets', encodeValuesOnly: true }), + // 'a[][b][c][]=1', + // 'brackets => brackets', + // ); + // st.equal( + // stringify({ a: [{ b: { c: [1] } }] }, { encodeValuesOnly: true }), + // 'a[0][b][c][0]=1', + // 'default => indices', + // ); + expect(stringify({ a: [{ b: { c: [1] } }] }, { arrayFormat: 'indices', encodeValuesOnly: true })).toBe( + 'a[0][b][c][0]=1', + ); + expect(stringify({ a: [{ b: { c: [1] } }] }, { arrayFormat: 'repeat', encodeValuesOnly: true })).toBe( + 'a[b][c]=1', + ); + expect(stringify({ a: [{ b: { c: [1] } }] }, { arrayFormat: 'brackets', encodeValuesOnly: true })).toBe( + 'a[][b][c][]=1', + ); + expect(stringify({ a: [{ b: { c: [1] } }] }, { encodeValuesOnly: true })).toBe('a[0][b][c][0]=1'); + }); + + test('stringifies an array with mixed objects and primitives', function () { + // st.equal( + // stringify({ a: [{ b: 1 }, 2, 3] }, { encodeValuesOnly: true, arrayFormat: 'indices' }), + // 'a[0][b]=1&a[1]=2&a[2]=3', + // 'indices => indices', + // ); + // st.equal( + // stringify({ a: [{ b: 1 }, 2, 3] }, { encodeValuesOnly: true, arrayFormat: 'brackets' }), + // 'a[][b]=1&a[]=2&a[]=3', + // 'brackets => brackets', + // ); + // st.equal( + // stringify({ a: [{ b: 1 }, 2, 3] }, { encodeValuesOnly: true, arrayFormat: 'comma' }), + // '???', + // 'brackets => brackets', + // { skip: 'TODO: figure out what this should do' }, + // ); + // st.equal( + // stringify({ a: [{ b: 1 }, 2, 3] }, { encodeValuesOnly: true }), + // 'a[0][b]=1&a[1]=2&a[2]=3', + // 'default => indices', + // ); + expect(stringify({ a: [{ b: 1 }, 2, 3] }, { encodeValuesOnly: true, arrayFormat: 'indices' })).toBe( + 'a[0][b]=1&a[1]=2&a[2]=3', + ); + expect(stringify({ a: [{ b: 1 }, 2, 3] }, { encodeValuesOnly: true, arrayFormat: 'brackets' })).toBe( + 'a[][b]=1&a[]=2&a[]=3', + ); + // !Skipped: Figure out what this should do + // expect( + // stringify({ a: [{ b: 1 }, 2, 3] }, { encodeValuesOnly: true, arrayFormat: 'comma' }), + // ).toBe('???'); + expect(stringify({ a: [{ b: 1 }, 2, 3] }, { encodeValuesOnly: true })).toBe('a[0][b]=1&a[1]=2&a[2]=3'); + }); + + test('stringifies an object inside an array with dots notation', function () { + // st.equal( + // stringify({ a: [{ b: 'c' }] }, { allowDots: true, encode: false, arrayFormat: 'indices' }), + // 'a[0].b=c', + // 'indices => indices', + // ); + // st.equal( + // stringify( + // { a: [{ b: 'c' }] }, + // { allowDots: true, encode: false, arrayFormat: 'brackets' }, + // ), + // 'a[].b=c', + // 'brackets => brackets', + // ); + // st.equal( + // stringify({ a: [{ b: 'c' }] }, { allowDots: true, encode: false }), + // 'a[0].b=c', + // 'default => indices', + // ); + expect(stringify({ a: [{ b: 'c' }] }, { allowDots: true, encode: false, arrayFormat: 'indices' })).toBe( + 'a[0].b=c', + ); + expect(stringify({ a: [{ b: 'c' }] }, { allowDots: true, encode: false, arrayFormat: 'brackets' })).toBe( + 'a[].b=c', + ); + expect(stringify({ a: [{ b: 'c' }] }, { allowDots: true, encode: false })).toBe('a[0].b=c'); + + // st.equal( + // stringify( + // { a: [{ b: { c: [1] } }] }, + // { allowDots: true, encode: false, arrayFormat: 'indices' }, + // ), + // 'a[0].b.c[0]=1', + // 'indices => indices', + // ); + // st.equal( + // stringify( + // { a: [{ b: { c: [1] } }] }, + // { allowDots: true, encode: false, arrayFormat: 'brackets' }, + // ), + // 'a[].b.c[]=1', + // 'brackets => brackets', + // ); + // st.equal( + // stringify({ a: [{ b: { c: [1] } }] }, { allowDots: true, encode: false }), + // 'a[0].b.c[0]=1', + // 'default => indices', + // ); + expect( + stringify({ a: [{ b: { c: [1] } }] }, { allowDots: true, encode: false, arrayFormat: 'indices' }), + ).toBe('a[0].b.c[0]=1'); + expect( + stringify({ a: [{ b: { c: [1] } }] }, { allowDots: true, encode: false, arrayFormat: 'brackets' }), + ).toBe('a[].b.c[]=1'); + expect(stringify({ a: [{ b: { c: [1] } }] }, { allowDots: true, encode: false })).toBe('a[0].b.c[0]=1'); + }); + + test('does not omit object keys when indices = false', function () { + // st.equal(stringify({ a: [{ b: 'c' }] }, { indices: false }), 'a%5Bb%5D=c'); + expect(stringify({ a: [{ b: 'c' }] }, { indices: false })).toBe('a%5Bb%5D=c'); + }); + + test('uses indices notation for arrays when indices=true', function () { + // st.equal(stringify({ a: ['b', 'c'] }, { indices: true }), 'a%5B0%5D=b&a%5B1%5D=c'); + expect(stringify({ a: ['b', 'c'] }, { indices: true })).toBe('a%5B0%5D=b&a%5B1%5D=c'); + }); + + test('uses indices notation for arrays when no arrayFormat is specified', function () { + // st.equal(stringify({ a: ['b', 'c'] }), 'a%5B0%5D=b&a%5B1%5D=c'); + expect(stringify({ a: ['b', 'c'] })).toBe('a%5B0%5D=b&a%5B1%5D=c'); + }); + + test('uses indices notation for arrays when arrayFormat=indices', function () { + // st.equal(stringify({ a: ['b', 'c'] }, { arrayFormat: 'indices' }), 'a%5B0%5D=b&a%5B1%5D=c'); + expect(stringify({ a: ['b', 'c'] }, { arrayFormat: 'indices' })).toBe('a%5B0%5D=b&a%5B1%5D=c'); + }); + + test('uses repeat notation for arrays when arrayFormat=repeat', function () { + // st.equal(stringify({ a: ['b', 'c'] }, { arrayFormat: 'repeat' }), 'a=b&a=c'); + expect(stringify({ a: ['b', 'c'] }, { arrayFormat: 'repeat' })).toBe('a=b&a=c'); + }); + + test('uses brackets notation for arrays when arrayFormat=brackets', function () { + // st.equal(stringify({ a: ['b', 'c'] }, { arrayFormat: 'brackets' }), 'a%5B%5D=b&a%5B%5D=c'); + expect(stringify({ a: ['b', 'c'] }, { arrayFormat: 'brackets' })).toBe('a%5B%5D=b&a%5B%5D=c'); + }); + + test('stringifies a complicated object', function () { + // st.equal(stringify({ a: { b: 'c', d: 'e' } }), 'a%5Bb%5D=c&a%5Bd%5D=e'); + expect(stringify({ a: { b: 'c', d: 'e' } })).toBe('a%5Bb%5D=c&a%5Bd%5D=e'); + }); + + test('stringifies an empty value', function () { + // st.equal(stringify({ a: '' }), 'a='); + // st.equal(stringify({ a: null }, { strictNullHandling: true }), 'a'); + expect(stringify({ a: '' })).toBe('a='); + expect(stringify({ a: null }, { strictNullHandling: true })).toBe('a'); + + // st.equal(stringify({ a: '', b: '' }), 'a=&b='); + // st.equal(stringify({ a: null, b: '' }, { strictNullHandling: true }), 'a&b='); + expect(stringify({ a: '', b: '' })).toBe('a=&b='); + expect(stringify({ a: null, b: '' }, { strictNullHandling: true })).toBe('a&b='); + + // st.equal(stringify({ a: { b: '' } }), 'a%5Bb%5D='); + // st.equal(stringify({ a: { b: null } }, { strictNullHandling: true }), 'a%5Bb%5D'); + // st.equal(stringify({ a: { b: null } }, { strictNullHandling: false }), 'a%5Bb%5D='); + expect(stringify({ a: { b: '' } })).toBe('a%5Bb%5D='); + expect(stringify({ a: { b: null } }, { strictNullHandling: true })).toBe('a%5Bb%5D'); + expect(stringify({ a: { b: null } }, { strictNullHandling: false })).toBe('a%5Bb%5D='); + }); + + test('stringifies an empty array in different arrayFormat', function () { + // st.equal(stringify({ a: [], b: [null], c: 'c' }, { encode: false }), 'b[0]=&c=c'); + expect(stringify({ a: [], b: [null], c: 'c' }, { encode: false })).toBe('b[0]=&c=c'); + // arrayFormat default + // st.equal( + // stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'indices' }), + // 'b[0]=&c=c', + // ); + // st.equal( + // stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'brackets' }), + // 'b[]=&c=c', + // ); + // st.equal( + // stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'repeat' }), + // 'b=&c=c', + // ); + // st.equal( + // stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'comma' }), + // 'b=&c=c', + // ); + // st.equal( + // stringify( + // { a: [], b: [null], c: 'c' }, + // { encode: false, arrayFormat: 'comma', commaRoundTrip: true }, + // ), + // 'b[]=&c=c', + // ); + expect(stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'indices' })).toBe( + 'b[0]=&c=c', + ); + expect(stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'brackets' })).toBe( + 'b[]=&c=c', + ); + expect(stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'repeat' })).toBe('b=&c=c'); + expect(stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'comma' })).toBe('b=&c=c'); + expect( + stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'comma', commaRoundTrip: true }), + ).toBe('b[]=&c=c'); + + // with strictNullHandling + // st.equal( + // stringify( + // { a: [], b: [null], c: 'c' }, + // { encode: false, arrayFormat: 'indices', strictNullHandling: true }, + // ), + // 'b[0]&c=c', + // ); + // st.equal( + // stringify( + // { a: [], b: [null], c: 'c' }, + // { encode: false, arrayFormat: 'brackets', strictNullHandling: true }, + // ), + // 'b[]&c=c', + // ); + // st.equal( + // stringify( + // { a: [], b: [null], c: 'c' }, + // { encode: false, arrayFormat: 'repeat', strictNullHandling: true }, + // ), + // 'b&c=c', + // ); + // st.equal( + // stringify( + // { a: [], b: [null], c: 'c' }, + // { encode: false, arrayFormat: 'comma', strictNullHandling: true }, + // ), + // 'b&c=c', + // ); + // st.equal( + // stringify( + // { a: [], b: [null], c: 'c' }, + // { encode: false, arrayFormat: 'comma', strictNullHandling: true, commaRoundTrip: true }, + // ), + // 'b[]&c=c', + // ); + + expect( + stringify( + { a: [], b: [null], c: 'c' }, + { encode: false, arrayFormat: 'indices', strictNullHandling: true }, + ), + ).toBe('b[0]&c=c'); + expect( + stringify( + { a: [], b: [null], c: 'c' }, + { encode: false, arrayFormat: 'brackets', strictNullHandling: true }, + ), + ).toBe('b[]&c=c'); + expect( + stringify( + { a: [], b: [null], c: 'c' }, + { encode: false, arrayFormat: 'repeat', strictNullHandling: true }, + ), + ).toBe('b&c=c'); + expect( + stringify( + { a: [], b: [null], c: 'c' }, + { encode: false, arrayFormat: 'comma', strictNullHandling: true }, + ), + ).toBe('b&c=c'); + expect( + stringify( + { a: [], b: [null], c: 'c' }, + { encode: false, arrayFormat: 'comma', strictNullHandling: true, commaRoundTrip: true }, + ), + ).toBe('b[]&c=c'); + + // with skipNulls + // st.equal( + // stringify( + // { a: [], b: [null], c: 'c' }, + // { encode: false, arrayFormat: 'indices', skipNulls: true }, + // ), + // 'c=c', + // ); + // st.equal( + // stringify( + // { a: [], b: [null], c: 'c' }, + // { encode: false, arrayFormat: 'brackets', skipNulls: true }, + // ), + // 'c=c', + // ); + // st.equal( + // stringify( + // { a: [], b: [null], c: 'c' }, + // { encode: false, arrayFormat: 'repeat', skipNulls: true }, + // ), + // 'c=c', + // ); + // st.equal( + // stringify( + // { a: [], b: [null], c: 'c' }, + // { encode: false, arrayFormat: 'comma', skipNulls: true }, + // ), + // 'c=c', + // ); + expect( + stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'indices', skipNulls: true }), + ).toBe('c=c'); + expect( + stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'brackets', skipNulls: true }), + ).toBe('c=c'); + expect( + stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'repeat', skipNulls: true }), + ).toBe('c=c'); + expect( + stringify({ a: [], b: [null], c: 'c' }, { encode: false, arrayFormat: 'comma', skipNulls: true }), + ).toBe('c=c'); + }); + + test('stringifies a null object', function () { + var obj = Object.create(null); + obj.a = 'b'; + // st.equal(stringify(obj), 'a=b'); + expect(stringify(obj)).toBe('a=b'); + }); + + test('returns an empty string for invalid input', function () { + // st.equal(stringify(undefined), ''); + // st.equal(stringify(false), ''); + // st.equal(stringify(null), ''); + // st.equal(stringify(''), ''); + expect(stringify(undefined)).toBe(''); + expect(stringify(false)).toBe(''); + expect(stringify(null)).toBe(''); + expect(stringify('')).toBe(''); + }); + + test('stringifies an object with a null object as a child', function () { + var obj = { a: Object.create(null) }; + + obj.a.b = 'c'; + // st.equal(stringify(obj), 'a%5Bb%5D=c'); + expect(stringify(obj)).toBe('a%5Bb%5D=c'); + }); + + test('drops keys with a value of undefined', function () { + // st.equal(stringify({ a: undefined }), ''); + expect(stringify({ a: undefined })).toBe(''); + + // st.equal( + // stringify({ a: { b: undefined, c: null } }, { strictNullHandling: true }), + // 'a%5Bc%5D', + // ); + // st.equal( + // stringify({ a: { b: undefined, c: null } }, { strictNullHandling: false }), + // 'a%5Bc%5D=', + // ); + // st.equal(stringify({ a: { b: undefined, c: '' } }), 'a%5Bc%5D='); + expect(stringify({ a: { b: undefined, c: null } }, { strictNullHandling: true })).toBe('a%5Bc%5D'); + expect(stringify({ a: { b: undefined, c: null } }, { strictNullHandling: false })).toBe('a%5Bc%5D='); + expect(stringify({ a: { b: undefined, c: '' } })).toBe('a%5Bc%5D='); + }); + + test('url encodes values', function () { + // st.equal(stringify({ a: 'b c' }), 'a=b%20c'); + expect(stringify({ a: 'b c' })).toBe('a=b%20c'); + }); + + test('stringifies a date', function () { + var now = new Date(); + var str = 'a=' + encodeURIComponent(now.toISOString()); + // st.equal(stringify({ a: now }), str); + expect(stringify({ a: now })).toBe(str); + }); + + test('stringifies the weird object from qs', function () { + // st.equal( + // stringify({ 'my weird field': '~q1!2"\'w$5&7/z8)?' }), + // 'my%20weird%20field=~q1%212%22%27w%245%267%2Fz8%29%3F', + // ); + expect(stringify({ 'my weird field': '~q1!2"\'w$5&7/z8)?' })).toBe( + 'my%20weird%20field=~q1%212%22%27w%245%267%2Fz8%29%3F', + ); + }); + + // TODO: Investigate how to to intercept in vitest + // TODO(rob) + test('skips properties that are part of the object prototype', function () { + // st.intercept(Object.prototype, 'crash', { value: 'test' }); + // @ts-expect-error + Object.prototype.crash = 'test'; + + // st.equal(stringify({ a: 'b' }), 'a=b'); + // st.equal(stringify({ a: { b: 'c' } }), 'a%5Bb%5D=c'); + expect(stringify({ a: 'b' })).toBe('a=b'); + expect(stringify({ a: { b: 'c' } })).toBe('a%5Bb%5D=c'); + }); + + test('stringifies boolean values', function () { + // st.equal(stringify({ a: true }), 'a=true'); + // st.equal(stringify({ a: { b: true } }), 'a%5Bb%5D=true'); + // st.equal(stringify({ b: false }), 'b=false'); + // st.equal(stringify({ b: { c: false } }), 'b%5Bc%5D=false'); + expect(stringify({ a: true })).toBe('a=true'); + expect(stringify({ a: { b: true } })).toBe('a%5Bb%5D=true'); + expect(stringify({ b: false })).toBe('b=false'); + expect(stringify({ b: { c: false } })).toBe('b%5Bc%5D=false'); + }); + + test('stringifies buffer values', function () { + // st.equal(stringify({ a: Buffer.from('test') }), 'a=test'); + // st.equal(stringify({ a: { b: Buffer.from('test') } }), 'a%5Bb%5D=test'); + }); + + test('stringifies an object using an alternative delimiter', function () { + // st.equal(stringify({ a: 'b', c: 'd' }, { delimiter: ';' }), 'a=b;c=d'); + expect(stringify({ a: 'b', c: 'd' }, { delimiter: ';' })).toBe('a=b;c=d'); + }); + + // We dont target environments which dont even have Buffer + // test('does not blow up when Buffer global is missing', function () { + // var restore = mockProperty(global, 'Buffer', { delete: true }); + + // var result = stringify({ a: 'b', c: 'd' }); + + // restore(); + + // st.equal(result, 'a=b&c=d'); + // st.end(); + // }); + + test('does not crash when parsing circular references', function () { + var a: any = {}; + a.b = a; + + // st['throws']( + // function () { + // stringify({ 'foo[bar]': 'baz', 'foo[baz]': a }); + // }, + // /RangeError: Cyclic object value/, + // 'cyclic values throw', + // ); + expect(() => { + stringify({ 'foo[bar]': 'baz', 'foo[baz]': a }); + }).toThrow('Cyclic object value'); + + var circular: any = { + a: 'value', + }; + circular.a = circular; + // st['throws']( + // function () { + // stringify(circular); + // }, + // /RangeError: Cyclic object value/, + // 'cyclic values throw', + // ); + expect(() => { + stringify(circular); + }).toThrow('Cyclic object value'); + + var arr = ['a']; + // st.doesNotThrow(function () { + // stringify({ x: arr, y: arr }); + // }, 'non-cyclic values do not throw'); + expect(() => { + stringify({ x: arr, y: arr }); + }).not.toThrow(); + }); + + test('non-circular duplicated references can still work', function () { + var hourOfDay = { + function: 'hour_of_day', + }; + + var p1 = { + function: 'gte', + arguments: [hourOfDay, 0], + }; + var p2 = { + function: 'lte', + arguments: [hourOfDay, 23], + }; + + // st.equal( + // stringify( + // { filters: { $and: [p1, p2] } }, + // { encodeValuesOnly: true, arrayFormat: 'indices' }, + // ), + // 'filters[$and][0][function]=gte&filters[$and][0][arguments][0][function]=hour_of_day&filters[$and][0][arguments][1]=0&filters[$and][1][function]=lte&filters[$and][1][arguments][0][function]=hour_of_day&filters[$and][1][arguments][1]=23', + // ); + // st.equal( + // stringify( + // { filters: { $and: [p1, p2] } }, + // { encodeValuesOnly: true, arrayFormat: 'brackets' }, + // ), + // 'filters[$and][][function]=gte&filters[$and][][arguments][][function]=hour_of_day&filters[$and][][arguments][]=0&filters[$and][][function]=lte&filters[$and][][arguments][][function]=hour_of_day&filters[$and][][arguments][]=23', + // ); + // st.equal( + // stringify( + // { filters: { $and: [p1, p2] } }, + // { encodeValuesOnly: true, arrayFormat: 'repeat' }, + // ), + // 'filters[$and][function]=gte&filters[$and][arguments][function]=hour_of_day&filters[$and][arguments]=0&filters[$and][function]=lte&filters[$and][arguments][function]=hour_of_day&filters[$and][arguments]=23', + // ); + expect( + stringify({ filters: { $and: [p1, p2] } }, { encodeValuesOnly: true, arrayFormat: 'indices' }), + ).toBe( + 'filters[$and][0][function]=gte&filters[$and][0][arguments][0][function]=hour_of_day&filters[$and][0][arguments][1]=0&filters[$and][1][function]=lte&filters[$and][1][arguments][0][function]=hour_of_day&filters[$and][1][arguments][1]=23', + ); + expect( + stringify({ filters: { $and: [p1, p2] } }, { encodeValuesOnly: true, arrayFormat: 'brackets' }), + ).toBe( + 'filters[$and][][function]=gte&filters[$and][][arguments][][function]=hour_of_day&filters[$and][][arguments][]=0&filters[$and][][function]=lte&filters[$and][][arguments][][function]=hour_of_day&filters[$and][][arguments][]=23', + ); + expect( + stringify({ filters: { $and: [p1, p2] } }, { encodeValuesOnly: true, arrayFormat: 'repeat' }), + ).toBe( + 'filters[$and][function]=gte&filters[$and][arguments][function]=hour_of_day&filters[$and][arguments]=0&filters[$and][function]=lte&filters[$and][arguments][function]=hour_of_day&filters[$and][arguments]=23', + ); + }); + + test('selects properties when filter=array', function () { + // st.equal(stringify({ a: 'b' }, { filter: ['a'] }), 'a=b'); + // st.equal(stringify({ a: 1 }, { filter: [] }), ''); + expect(stringify({ a: 'b' }, { filter: ['a'] })).toBe('a=b'); + expect(stringify({ a: 1 }, { filter: [] })).toBe(''); + + // st.equal( + // stringify( + // { a: { b: [1, 2, 3, 4], c: 'd' }, c: 'f' }, + // { filter: ['a', 'b', 0, 2], arrayFormat: 'indices' }, + // ), + // 'a%5Bb%5D%5B0%5D=1&a%5Bb%5D%5B2%5D=3', + // 'indices => indices', + // ); + // st.equal( + // stringify( + // { a: { b: [1, 2, 3, 4], c: 'd' }, c: 'f' }, + // { filter: ['a', 'b', 0, 2], arrayFormat: 'brackets' }, + // ), + // 'a%5Bb%5D%5B%5D=1&a%5Bb%5D%5B%5D=3', + // 'brackets => brackets', + // ); + // st.equal( + // stringify({ a: { b: [1, 2, 3, 4], c: 'd' }, c: 'f' }, { filter: ['a', 'b', 0, 2] }), + // 'a%5Bb%5D%5B0%5D=1&a%5Bb%5D%5B2%5D=3', + // 'default => indices', + // ); + expect(stringify({ a: { b: [1, 2, 3, 4], c: 'd' }, c: 'f' }, { filter: ['a', 'b', 0, 2] })).toBe( + 'a%5Bb%5D%5B0%5D=1&a%5Bb%5D%5B2%5D=3', + ); + expect( + stringify( + { a: { b: [1, 2, 3, 4], c: 'd' }, c: 'f' }, + { filter: ['a', 'b', 0, 2], arrayFormat: 'indices' }, + ), + ).toBe('a%5Bb%5D%5B0%5D=1&a%5Bb%5D%5B2%5D=3'); + expect( + stringify( + { a: { b: [1, 2, 3, 4], c: 'd' }, c: 'f' }, + { filter: ['a', 'b', 0, 2], arrayFormat: 'brackets' }, + ), + ).toBe('a%5Bb%5D%5B%5D=1&a%5Bb%5D%5B%5D=3'); + }); + + test('supports custom representations when filter=function', function () { + var calls = 0; + var obj = { a: 'b', c: 'd', e: { f: new Date(1257894000000) } }; + var filterFunc: StringifyOptions['filter'] = function (prefix, value) { + calls += 1; + if (calls === 1) { + // st.equal(prefix, '', 'prefix is empty'); + // st.equal(value, obj); + expect(prefix).toBe(''); + expect(value).toBe(obj); + } else if (prefix === 'c') { + return void 0; + } else if (value instanceof Date) { + // st.equal(prefix, 'e[f]'); + expect(prefix).toBe('e[f]'); + return value.getTime(); + } + return value; + }; + + // st.equal(stringify(obj, { filter: filterFunc }), 'a=b&e%5Bf%5D=1257894000000'); + // st.equal(calls, 5); + expect(stringify(obj, { filter: filterFunc })).toBe('a=b&e%5Bf%5D=1257894000000'); + expect(calls).toBe(5); + }); + + test('can disable uri encoding', function () { + // st.equal(stringify({ a: 'b' }, { encode: false }), 'a=b'); + // st.equal(stringify({ a: { b: 'c' } }, { encode: false }), 'a[b]=c'); + // st.equal( + // stringify({ a: 'b', c: null }, { strictNullHandling: true, encode: false }), + // 'a=b&c', + // ); + expect(stringify({ a: 'b' }, { encode: false })).toBe('a=b'); + expect(stringify({ a: { b: 'c' } }, { encode: false })).toBe('a[b]=c'); + expect(stringify({ a: 'b', c: null }, { strictNullHandling: true, encode: false })).toBe('a=b&c'); + }); + + test('can sort the keys', function () { + // @ts-expect-error + var sort: NonNullable = function (a: string, b: string) { + return a.localeCompare(b); + }; + // st.equal(stringify({ a: 'c', z: 'y', b: 'f' }, { sort: sort }), 'a=c&b=f&z=y'); + // st.equal( + // stringify({ a: 'c', z: { j: 'a', i: 'b' }, b: 'f' }, { sort: sort }), + // 'a=c&b=f&z%5Bi%5D=b&z%5Bj%5D=a', + // ); + expect(stringify({ a: 'c', z: 'y', b: 'f' }, { sort: sort })).toBe('a=c&b=f&z=y'); + expect(stringify({ a: 'c', z: { j: 'a', i: 'b' }, b: 'f' }, { sort: sort })).toBe( + 'a=c&b=f&z%5Bi%5D=b&z%5Bj%5D=a', + ); + }); + + test('can sort the keys at depth 3 or more too', function () { + // @ts-expect-error + var sort: NonNullable = function (a: string, b: string) { + return a.localeCompare(b); + }; + // st.equal( + // stringify( + // { a: 'a', z: { zj: { zjb: 'zjb', zja: 'zja' }, zi: { zib: 'zib', zia: 'zia' } }, b: 'b' }, + // { sort: sort, encode: false }, + // ), + // 'a=a&b=b&z[zi][zia]=zia&z[zi][zib]=zib&z[zj][zja]=zja&z[zj][zjb]=zjb', + // ); + // st.equal( + // stringify( + // { a: 'a', z: { zj: { zjb: 'zjb', zja: 'zja' }, zi: { zib: 'zib', zia: 'zia' } }, b: 'b' }, + // { sort: null, encode: false }, + // ), + // 'a=a&z[zj][zjb]=zjb&z[zj][zja]=zja&z[zi][zib]=zib&z[zi][zia]=zia&b=b', + // ); + expect( + stringify( + { a: 'a', z: { zj: { zjb: 'zjb', zja: 'zja' }, zi: { zib: 'zib', zia: 'zia' } }, b: 'b' }, + { sort: sort, encode: false }, + ), + ).toBe('a=a&b=b&z[zi][zia]=zia&z[zi][zib]=zib&z[zj][zja]=zja&z[zj][zjb]=zjb'); + expect( + stringify( + { a: 'a', z: { zj: { zjb: 'zjb', zja: 'zja' }, zi: { zib: 'zib', zia: 'zia' } }, b: 'b' }, + { sort: null, encode: false }, + ), + ).toBe('a=a&z[zj][zjb]=zjb&z[zj][zja]=zja&z[zi][zib]=zib&z[zi][zia]=zia&b=b'); + }); + + test('can stringify with custom encoding', function () { + // st.equal( + // stringify( + // { 県: '大阪府', '': '' }, + // { + // encoder: function (str) { + // if (str.length === 0) { + // return ''; + // } + // var buf = iconv.encode(str, 'shiftjis'); + // var result = []; + // for (var i = 0; i < buf.length; ++i) { + // result.push(buf.readUInt8(i).toString(16)); + // } + // return '%' + result.join('%'); + // }, + // }, + // ), + // '%8c%a7=%91%e5%8d%e3%95%7b&=', + // ); + expect( + stringify( + { 県: '大阪府', '': '' }, + { + encoder: function (str) { + if (str.length === 0) { + return ''; + } + var buf = iconv.encode(str, 'shiftjis'); + var result = []; + for (var i = 0; i < buf.length; ++i) { + result.push(buf.readUInt8(i).toString(16)); + } + return '%' + result.join('%'); + }, + }, + ), + ).toBe('%8c%a7=%91%e5%8d%e3%95%7b&='); + }); + + test('receives the default encoder as a second argument', function () { + // stringify( + // { a: 1, b: new Date(), c: true, d: [1] }, + // { + // encoder: function (str) { + // st.match(typeof str, /^(?:string|number|boolean)$/); + // return ''; + // }, + // }, + // ); + + stringify( + { a: 1, b: new Date(), c: true, d: [1] }, + { + encoder: function (str) { + // st.match(typeof str, /^(?:string|number|boolean)$/); + assert.match(typeof str, /^(?:string|number|boolean)$/); + return ''; + }, + }, + ); + }); + + test('receives the default encoder as a second argument', function () { + // stringify( + // { a: 1 }, + // { + // encoder: function (str, defaultEncoder) { + // st.equal(defaultEncoder, utils.encode); + // }, + // }, + // ); + + stringify( + { a: 1 }, + { + // @ts-ignore + encoder: function (_str, defaultEncoder) { + expect(defaultEncoder).toBe(encode); + }, + }, + ); + }); + + test('throws error with wrong encoder', function () { + // st['throws'](function () { + // stringify({}, { encoder: 'string' }); + // }, new TypeError('Encoder has to be a function.')); + // st.end(); + expect(() => { + // @ts-expect-error + stringify({}, { encoder: 'string' }); + }).toThrow(TypeError); + }); + + (typeof Buffer === 'undefined' ? test.skip : test)( + 'can use custom encoder for a buffer object', + function () { + // st.equal( + // stringify( + // { a: Buffer.from([1]) }, + // { + // encoder: function (buffer) { + // if (typeof buffer === 'string') { + // return buffer; + // } + // return String.fromCharCode(buffer.readUInt8(0) + 97); + // }, + // }, + // ), + // 'a=b', + // ); + expect( + stringify( + { a: Buffer.from([1]) }, + { + encoder: function (buffer) { + if (typeof buffer === 'string') { + return buffer; + } + return String.fromCharCode(buffer.readUInt8(0) + 97); + }, + }, + ), + ).toBe('a=b'); + + // st.equal( + // stringify( + // { a: Buffer.from('a b') }, + // { + // encoder: function (buffer) { + // return buffer; + // }, + // }, + // ), + // 'a=a b', + // ); + expect( + stringify( + { a: Buffer.from('a b') }, + { + encoder: function (buffer) { + return buffer; + }, + }, + ), + ).toBe('a=a b'); + }, + ); + + test('serializeDate option', function () { + var date = new Date(); + // st.equal( + // stringify({ a: date }), + // 'a=' + date.toISOString().replace(/:/g, '%3A'), + // 'default is toISOString', + // ); + expect(stringify({ a: date })).toBe('a=' + date.toISOString().replace(/:/g, '%3A')); + + var mutatedDate = new Date(); + mutatedDate.toISOString = function () { + throw new SyntaxError(); + }; + // st['throws'](function () { + // mutatedDate.toISOString(); + // }, SyntaxError); + expect(() => { + mutatedDate.toISOString(); + }).toThrow(SyntaxError); + // st.equal( + // stringify({ a: mutatedDate }), + // 'a=' + Date.prototype.toISOString.call(mutatedDate).replace(/:/g, '%3A'), + // 'toISOString works even when method is not locally present', + // ); + expect(stringify({ a: mutatedDate })).toBe( + 'a=' + Date.prototype.toISOString.call(mutatedDate).replace(/:/g, '%3A'), + ); + + var specificDate = new Date(6); + // st.equal( + // stringify( + // { a: specificDate }, + // { + // serializeDate: function (d) { + // return d.getTime() * 7; + // }, + // }, + // ), + // 'a=42', + // 'custom serializeDate function called', + // ); + expect( + stringify( + { a: specificDate }, + { + // @ts-ignore + serializeDate: function (d) { + return d.getTime() * 7; + }, + }, + ), + ).toBe('a=42'); + + // st.equal( + // stringify( + // { a: [date] }, + // { + // serializeDate: function (d) { + // return d.getTime(); + // }, + // arrayFormat: 'comma', + // }, + // ), + // 'a=' + date.getTime(), + // 'works with arrayFormat comma', + // ); + // st.equal( + // stringify( + // { a: [date] }, + // { + // serializeDate: function (d) { + // return d.getTime(); + // }, + // arrayFormat: 'comma', + // commaRoundTrip: true, + // }, + // ), + // 'a%5B%5D=' + date.getTime(), + // 'works with arrayFormat comma', + // ); + expect( + stringify( + { a: [date] }, + { + // @ts-expect-error + serializeDate: function (d) { + return d.getTime(); + }, + arrayFormat: 'comma', + }, + ), + ).toBe('a=' + date.getTime()); + expect( + stringify( + { a: [date] }, + { + // @ts-expect-error + serializeDate: function (d) { + return d.getTime(); + }, + arrayFormat: 'comma', + commaRoundTrip: true, + }, + ), + ).toBe('a%5B%5D=' + date.getTime()); + }); + + test('RFC 1738 serialization', function () { + // st.equal(stringify({ a: 'b c' }, { format: formats.RFC1738 }), 'a=b+c'); + // st.equal(stringify({ 'a b': 'c d' }, { format: formats.RFC1738 }), 'a+b=c+d'); + // st.equal( + // stringify({ 'a b': Buffer.from('a b') }, { format: formats.RFC1738 }), + // 'a+b=a+b', + // ); + expect(stringify({ a: 'b c' }, { format: 'RFC1738' })).toBe('a=b+c'); + expect(stringify({ 'a b': 'c d' }, { format: 'RFC1738' })).toBe('a+b=c+d'); + expect(stringify({ 'a b': Buffer.from('a b') }, { format: 'RFC1738' })).toBe('a+b=a+b'); + + // st.equal(stringify({ 'foo(ref)': 'bar' }, { format: formats.RFC1738 }), 'foo(ref)=bar'); + expect(stringify({ 'foo(ref)': 'bar' }, { format: 'RFC1738' })).toBe('foo(ref)=bar'); + }); + + test('RFC 3986 spaces serialization', function () { + // st.equal(stringify({ a: 'b c' }, { format: formats.RFC3986 }), 'a=b%20c'); + // st.equal(stringify({ 'a b': 'c d' }, { format: formats.RFC3986 }), 'a%20b=c%20d'); + // st.equal( + // stringify({ 'a b': Buffer.from('a b') }, { format: formats.RFC3986 }), + // 'a%20b=a%20b', + // ); + expect(stringify({ a: 'b c' }, { format: 'RFC3986' })).toBe('a=b%20c'); + expect(stringify({ 'a b': 'c d' }, { format: 'RFC3986' })).toBe('a%20b=c%20d'); + expect(stringify({ 'a b': Buffer.from('a b') }, { format: 'RFC3986' })).toBe('a%20b=a%20b'); + }); + + test('Backward compatibility to RFC 3986', function () { + // st.equal(stringify({ a: 'b c' }), 'a=b%20c'); + // st.equal(stringify({ 'a b': Buffer.from('a b') }), 'a%20b=a%20b'); + expect(stringify({ a: 'b c' })).toBe('a=b%20c'); + expect(stringify({ 'a b': Buffer.from('a b') })).toBe('a%20b=a%20b'); + }); + + test('Edge cases and unknown formats', function () { + ['UFO1234', false, 1234, null, {}, []].forEach(function (format) { + // st['throws'](function () { + // stringify({ a: 'b c' }, { format: format }); + // }, new TypeError('Unknown format option provided.')); + expect(() => { + // @ts-expect-error + stringify({ a: 'b c' }, { format: format }); + }).toThrow(TypeError); + }); + }); + + test('encodeValuesOnly', function () { + // st.equal( + // stringify( + // { a: 'b', c: ['d', 'e=f'], f: [['g'], ['h']] }, + // { encodeValuesOnly: true, arrayFormat: 'indices' }, + // ), + // 'a=b&c[0]=d&c[1]=e%3Df&f[0][0]=g&f[1][0]=h', + // 'encodeValuesOnly + indices', + // ); + // st.equal( + // stringify( + // { a: 'b', c: ['d', 'e=f'], f: [['g'], ['h']] }, + // { encodeValuesOnly: true, arrayFormat: 'brackets' }, + // ), + // 'a=b&c[]=d&c[]=e%3Df&f[][]=g&f[][]=h', + // 'encodeValuesOnly + brackets', + // ); + // st.equal( + // stringify( + // { a: 'b', c: ['d', 'e=f'], f: [['g'], ['h']] }, + // { encodeValuesOnly: true, arrayFormat: 'repeat' }, + // ), + // 'a=b&c=d&c=e%3Df&f=g&f=h', + // 'encodeValuesOnly + repeat', + // ); + expect( + stringify( + { a: 'b', c: ['d', 'e=f'], f: [['g'], ['h']] }, + { encodeValuesOnly: true, arrayFormat: 'indices' }, + ), + ).toBe('a=b&c[0]=d&c[1]=e%3Df&f[0][0]=g&f[1][0]=h'); + expect( + stringify( + { a: 'b', c: ['d', 'e=f'], f: [['g'], ['h']] }, + { encodeValuesOnly: true, arrayFormat: 'brackets' }, + ), + ).toBe('a=b&c[]=d&c[]=e%3Df&f[][]=g&f[][]=h'); + expect( + stringify( + { a: 'b', c: ['d', 'e=f'], f: [['g'], ['h']] }, + { encodeValuesOnly: true, arrayFormat: 'repeat' }, + ), + ).toBe('a=b&c=d&c=e%3Df&f=g&f=h'); + + // st.equal( + // stringify({ a: 'b', c: ['d', 'e'], f: [['g'], ['h']] }, { arrayFormat: 'indices' }), + // 'a=b&c%5B0%5D=d&c%5B1%5D=e&f%5B0%5D%5B0%5D=g&f%5B1%5D%5B0%5D=h', + // 'no encodeValuesOnly + indices', + // ); + // st.equal( + // stringify({ a: 'b', c: ['d', 'e'], f: [['g'], ['h']] }, { arrayFormat: 'brackets' }), + // 'a=b&c%5B%5D=d&c%5B%5D=e&f%5B%5D%5B%5D=g&f%5B%5D%5B%5D=h', + // 'no encodeValuesOnly + brackets', + // ); + // st.equal( + // stringify({ a: 'b', c: ['d', 'e'], f: [['g'], ['h']] }, { arrayFormat: 'repeat' }), + // 'a=b&c=d&c=e&f=g&f=h', + // 'no encodeValuesOnly + repeat', + // ); + expect(stringify({ a: 'b', c: ['d', 'e'], f: [['g'], ['h']] }, { arrayFormat: 'indices' })).toBe( + 'a=b&c%5B0%5D=d&c%5B1%5D=e&f%5B0%5D%5B0%5D=g&f%5B1%5D%5B0%5D=h', + ); + expect(stringify({ a: 'b', c: ['d', 'e'], f: [['g'], ['h']] }, { arrayFormat: 'brackets' })).toBe( + 'a=b&c%5B%5D=d&c%5B%5D=e&f%5B%5D%5B%5D=g&f%5B%5D%5B%5D=h', + ); + expect(stringify({ a: 'b', c: ['d', 'e'], f: [['g'], ['h']] }, { arrayFormat: 'repeat' })).toBe( + 'a=b&c=d&c=e&f=g&f=h', + ); + }); + + test('encodeValuesOnly - strictNullHandling', function () { + // st.equal( + // stringify({ a: { b: null } }, { encodeValuesOnly: true, strictNullHandling: true }), + // 'a[b]', + // ); + expect(stringify({ a: { b: null } }, { encodeValuesOnly: true, strictNullHandling: true })).toBe('a[b]'); + }); + + test('throws if an invalid charset is specified', function () { + // st['throws'](function () { + // stringify({ a: 'b' }, { charset: 'foobar' }); + // }, new TypeError('The charset option must be either utf-8, iso-8859-1, or undefined')); + expect(() => { + // @ts-expect-error + stringify({ a: 'b' }, { charset: 'foobar' }); + }).toThrow(TypeError); + }); + + test('respects a charset of iso-8859-1', function () { + // st.equal(stringify({ æ: 'æ' }, { charset: 'iso-8859-1' }), '%E6=%E6'); + expect(stringify({ æ: 'æ' }, { charset: 'iso-8859-1' })).toBe('%E6=%E6'); + }); + + test('encodes unrepresentable chars as numeric entities in iso-8859-1 mode', function () { + // st.equal(stringify({ a: '☺' }, { charset: 'iso-8859-1' }), 'a=%26%239786%3B'); + expect(stringify({ a: '☺' }, { charset: 'iso-8859-1' })).toBe('a=%26%239786%3B'); + }); + + test('respects an explicit charset of utf-8 (the default)', function () { + // st.equal(stringify({ a: 'æ' }, { charset: 'utf-8' }), 'a=%C3%A6'); + expect(stringify({ a: 'æ' }, { charset: 'utf-8' })).toBe('a=%C3%A6'); + }); + + test('`charsetSentinel` option', function () { + // st.equal( + // stringify({ a: 'æ' }, { charsetSentinel: true, charset: 'utf-8' }), + // 'utf8=%E2%9C%93&a=%C3%A6', + // 'adds the right sentinel when instructed to and the charset is utf-8', + // ); + expect(stringify({ a: 'æ' }, { charsetSentinel: true, charset: 'utf-8' })).toBe( + 'utf8=%E2%9C%93&a=%C3%A6', + ); + + // st.equal( + // stringify({ a: 'æ' }, { charsetSentinel: true, charset: 'iso-8859-1' }), + // 'utf8=%26%2310003%3B&a=%E6', + // 'adds the right sentinel when instructed to and the charset is iso-8859-1', + // ); + expect(stringify({ a: 'æ' }, { charsetSentinel: true, charset: 'iso-8859-1' })).toBe( + 'utf8=%26%2310003%3B&a=%E6', + ); + }); + + test('does not mutate the options argument', function () { + var options = {}; + stringify({}, options); + // st.deepEqual(options, {}); + expect(options).toEqual({}); + }); + + test('strictNullHandling works with custom filter', function () { + // @ts-expect-error + var filter = function (_prefix, value) { + return value; + }; + + var options = { strictNullHandling: true, filter: filter }; + // st.equal(stringify({ key: null }, options), 'key'); + expect(stringify({ key: null }, options)).toBe('key'); + }); + + test('strictNullHandling works with null serializeDate', function () { + var serializeDate = function () { + return null; + }; + var options = { strictNullHandling: true, serializeDate: serializeDate }; + var date = new Date(); + // st.equal(stringify({ key: date }, options), 'key'); + // @ts-expect-error + expect(stringify({ key: date }, options)).toBe('key'); + }); + + test('allows for encoding keys and values differently', function () { + // @ts-expect-error + var encoder = function (str, defaultEncoder, charset, type) { + if (type === 'key') { + return defaultEncoder(str, defaultEncoder, charset, type).toLowerCase(); + } + if (type === 'value') { + return defaultEncoder(str, defaultEncoder, charset, type).toUpperCase(); + } + throw 'this should never happen! type: ' + type; + }; + + // st.deepEqual(stringify({ KeY: 'vAlUe' }, { encoder: encoder }), 'key=VALUE'); + expect(stringify({ KeY: 'vAlUe' }, { encoder: encoder })).toBe('key=VALUE'); + }); + + test('objects inside arrays', function () { + var obj = { a: { b: { c: 'd', e: 'f' } } }; + var withArray = { a: { b: [{ c: 'd', e: 'f' }] } }; + + // st.equal( + // stringify(obj, { encode: false }), + // 'a[b][c]=d&a[b][e]=f', + // 'no array, no arrayFormat', + // ); + // st.equal( + // stringify(obj, { encode: false, arrayFormat: 'brackets' }), + // 'a[b][c]=d&a[b][e]=f', + // 'no array, bracket', + // ); + // st.equal( + // stringify(obj, { encode: false, arrayFormat: 'indices' }), + // 'a[b][c]=d&a[b][e]=f', + // 'no array, indices', + // ); + // st.equal( + // stringify(obj, { encode: false, arrayFormat: 'repeat' }), + // 'a[b][c]=d&a[b][e]=f', + // 'no array, repeat', + // ); + // st.equal( + // stringify(obj, { encode: false, arrayFormat: 'comma' }), + // 'a[b][c]=d&a[b][e]=f', + // 'no array, comma', + // ); + expect(stringify(obj, { encode: false })).toBe('a[b][c]=d&a[b][e]=f'); + expect(stringify(obj, { encode: false, arrayFormat: 'brackets' })).toBe('a[b][c]=d&a[b][e]=f'); + expect(stringify(obj, { encode: false, arrayFormat: 'indices' })).toBe('a[b][c]=d&a[b][e]=f'); + expect(stringify(obj, { encode: false, arrayFormat: 'repeat' })).toBe('a[b][c]=d&a[b][e]=f'); + expect(stringify(obj, { encode: false, arrayFormat: 'comma' })).toBe('a[b][c]=d&a[b][e]=f'); + + // st.equal( + // stringify(withArray, { encode: false }), + // 'a[b][0][c]=d&a[b][0][e]=f', + // 'array, no arrayFormat', + // ); + // st.equal( + // stringify(withArray, { encode: false, arrayFormat: 'brackets' }), + // 'a[b][][c]=d&a[b][][e]=f', + // 'array, bracket', + // ); + // st.equal( + // stringify(withArray, { encode: false, arrayFormat: 'indices' }), + // 'a[b][0][c]=d&a[b][0][e]=f', + // 'array, indices', + // ); + // st.equal( + // stringify(withArray, { encode: false, arrayFormat: 'repeat' }), + // 'a[b][c]=d&a[b][e]=f', + // 'array, repeat', + // ); + // st.equal( + // stringify(withArray, { encode: false, arrayFormat: 'comma' }), + // '???', + // 'array, comma', + // { skip: 'TODO: figure out what this should do' }, + // ); + expect(stringify(withArray, { encode: false })).toBe('a[b][0][c]=d&a[b][0][e]=f'); + expect(stringify(withArray, { encode: false, arrayFormat: 'brackets' })).toBe('a[b][][c]=d&a[b][][e]=f'); + expect(stringify(withArray, { encode: false, arrayFormat: 'indices' })).toBe('a[b][0][c]=d&a[b][0][e]=f'); + expect(stringify(withArray, { encode: false, arrayFormat: 'repeat' })).toBe('a[b][c]=d&a[b][e]=f'); + // !TODo: Figure out what this should do + // expect(stringify(withArray, { encode: false, arrayFormat: 'comma' })).toBe( + // 'a[b][c]=d&a[b][e]=f', + // ); + }); + + test('stringifies sparse arrays', function () { + // st.equal( + // stringify({ a: [, '2', , , '1'] }, { encodeValuesOnly: true, arrayFormat: 'indices' }), + // 'a[1]=2&a[4]=1', + // ); + // st.equal( + // stringify({ a: [, '2', , , '1'] }, { encodeValuesOnly: true, arrayFormat: 'brackets' }), + // 'a[]=2&a[]=1', + // ); + // st.equal( + // stringify({ a: [, '2', , , '1'] }, { encodeValuesOnly: true, arrayFormat: 'repeat' }), + // 'a=2&a=1', + // ); + expect(stringify({ a: [, '2', , , '1'] }, { encodeValuesOnly: true, arrayFormat: 'indices' })).toBe( + 'a[1]=2&a[4]=1', + ); + expect(stringify({ a: [, '2', , , '1'] }, { encodeValuesOnly: true, arrayFormat: 'brackets' })).toBe( + 'a[]=2&a[]=1', + ); + expect(stringify({ a: [, '2', , , '1'] }, { encodeValuesOnly: true, arrayFormat: 'repeat' })).toBe( + 'a=2&a=1', + ); + + // st.equal( + // stringify( + // { a: [, { b: [, , { c: '1' }] }] }, + // { encodeValuesOnly: true, arrayFormat: 'indices' }, + // ), + // 'a[1][b][2][c]=1', + // ); + // st.equal( + // stringify( + // { a: [, { b: [, , { c: '1' }] }] }, + // { encodeValuesOnly: true, arrayFormat: 'brackets' }, + // ), + // 'a[][b][][c]=1', + // ); + // st.equal( + // stringify( + // { a: [, { b: [, , { c: '1' }] }] }, + // { encodeValuesOnly: true, arrayFormat: 'repeat' }, + // ), + // 'a[b][c]=1', + // ); + expect( + stringify({ a: [, { b: [, , { c: '1' }] }] }, { encodeValuesOnly: true, arrayFormat: 'indices' }), + ).toBe('a[1][b][2][c]=1'); + expect( + stringify({ a: [, { b: [, , { c: '1' }] }] }, { encodeValuesOnly: true, arrayFormat: 'brackets' }), + ).toBe('a[][b][][c]=1'); + expect( + stringify({ a: [, { b: [, , { c: '1' }] }] }, { encodeValuesOnly: true, arrayFormat: 'repeat' }), + ).toBe('a[b][c]=1'); + + // st.equal( + // stringify( + // { a: [, [, , [, , , { c: '1' }]]] }, + // { encodeValuesOnly: true, arrayFormat: 'indices' }, + // ), + // 'a[1][2][3][c]=1', + // ); + // st.equal( + // stringify( + // { a: [, [, , [, , , { c: '1' }]]] }, + // { encodeValuesOnly: true, arrayFormat: 'brackets' }, + // ), + // 'a[][][][c]=1', + // ); + // st.equal( + // stringify( + // { a: [, [, , [, , , { c: '1' }]]] }, + // { encodeValuesOnly: true, arrayFormat: 'repeat' }, + // ), + // 'a[c]=1', + // ); + expect( + stringify({ a: [, [, , [, , , { c: '1' }]]] }, { encodeValuesOnly: true, arrayFormat: 'indices' }), + ).toBe('a[1][2][3][c]=1'); + expect( + stringify({ a: [, [, , [, , , { c: '1' }]]] }, { encodeValuesOnly: true, arrayFormat: 'brackets' }), + ).toBe('a[][][][c]=1'); + expect( + stringify({ a: [, [, , [, , , { c: '1' }]]] }, { encodeValuesOnly: true, arrayFormat: 'repeat' }), + ).toBe('a[c]=1'); + + // st.equal( + // stringify( + // { a: [, [, , [, , , { c: [, '1'] }]]] }, + // { encodeValuesOnly: true, arrayFormat: 'indices' }, + // ), + // 'a[1][2][3][c][1]=1', + // ); + // st.equal( + // stringify( + // { a: [, [, , [, , , { c: [, '1'] }]]] }, + // { encodeValuesOnly: true, arrayFormat: 'brackets' }, + // ), + // 'a[][][][c][]=1', + // ); + // st.equal( + // stringify( + // { a: [, [, , [, , , { c: [, '1'] }]]] }, + // { encodeValuesOnly: true, arrayFormat: 'repeat' }, + // ), + // 'a[c]=1', + // ); + expect( + stringify({ a: [, [, , [, , , { c: [, '1'] }]]] }, { encodeValuesOnly: true, arrayFormat: 'indices' }), + ).toBe('a[1][2][3][c][1]=1'); + expect( + stringify({ a: [, [, , [, , , { c: [, '1'] }]]] }, { encodeValuesOnly: true, arrayFormat: 'brackets' }), + ).toBe('a[][][][c][]=1'); + expect( + stringify({ a: [, [, , [, , , { c: [, '1'] }]]] }, { encodeValuesOnly: true, arrayFormat: 'repeat' }), + ).toBe('a[c]=1'); + }); + + test('encodes a very long string', function () { + var chars = []; + var expected = []; + for (var i = 0; i < 5e3; i++) { + chars.push(' ' + i); + + expected.push('%20' + i); + } + + var obj = { + foo: chars.join(''), + }; + + // st.equal( + // stringify(obj, { arrayFormat: 'bracket', charset: 'utf-8' }), + // 'foo=' + expected.join(''), + // ); + // @ts-expect-error + expect(stringify(obj, { arrayFormat: 'bracket', charset: 'utf-8' })).toBe('foo=' + expected.join('')); + }); +}); + +describe('stringifies empty keys', function () { + empty_test_cases.forEach(function (testCase) { + test('stringifies an object with empty string key with ' + testCase.input, function () { + // st.deepEqual( + // stringify(testCase.withEmptyKeys, { encode: false, arrayFormat: 'indices' }), + // testCase.stringifyOutput.indices, + // 'test case: ' + testCase.input + ', indices', + // ); + // st.deepEqual( + // stringify(testCase.withEmptyKeys, { encode: false, arrayFormat: 'brackets' }), + // testCase.stringifyOutput.brackets, + // 'test case: ' + testCase.input + ', brackets', + // ); + // st.deepEqual( + // stringify(testCase.withEmptyKeys, { encode: false, arrayFormat: 'repeat' }), + // testCase.stringifyOutput.repeat, + // 'test case: ' + testCase.input + ', repeat', + // ); + expect(stringify(testCase.with_empty_keys, { encode: false, arrayFormat: 'indices' })).toBe( + testCase.stringify_output.indices, + ); + expect(stringify(testCase.with_empty_keys, { encode: false, arrayFormat: 'brackets' })).toBe( + testCase.stringify_output.brackets, + ); + expect(stringify(testCase.with_empty_keys, { encode: false, arrayFormat: 'repeat' })).toBe( + testCase.stringify_output.repeat, + ); + }); + }); + + test('edge case with object/arrays', function () { + // st.deepEqual(stringify({ '': { '': [2, 3] } }, { encode: false }), '[][0]=2&[][1]=3'); + // st.deepEqual( + // stringify({ '': { '': [2, 3], a: 2 } }, { encode: false }), + // '[][0]=2&[][1]=3&[a]=2', + // ); + // st.deepEqual( + // stringify({ '': { '': [2, 3] } }, { encode: false, arrayFormat: 'indices' }), + // '[][0]=2&[][1]=3', + // ); + // st.deepEqual( + // stringify({ '': { '': [2, 3], a: 2 } }, { encode: false, arrayFormat: 'indices' }), + // '[][0]=2&[][1]=3&[a]=2', + // ); + expect(stringify({ '': { '': [2, 3] } }, { encode: false })).toBe('[][0]=2&[][1]=3'); + expect(stringify({ '': { '': [2, 3], a: 2 } }, { encode: false })).toBe('[][0]=2&[][1]=3&[a]=2'); + expect(stringify({ '': { '': [2, 3] } }, { encode: false, arrayFormat: 'indices' })).toBe( + '[][0]=2&[][1]=3', + ); + expect(stringify({ '': { '': [2, 3], a: 2 } }, { encode: false, arrayFormat: 'indices' })).toBe( + '[][0]=2&[][1]=3&[a]=2', + ); + }); +}); diff --git a/tests/qs/utils.test.ts b/tests/qs/utils.test.ts new file mode 100644 index 000000000..3df95e5bd --- /dev/null +++ b/tests/qs/utils.test.ts @@ -0,0 +1,169 @@ +import { combine, merge, is_buffer, assign_single_source } from 'openai/internal/qs/utils'; + +describe('merge()', function () { + // t.deepEqual(merge(null, true), [null, true], 'merges true into null'); + expect(merge(null, true)).toEqual([null, true]); + + // t.deepEqual(merge(null, [42]), [null, 42], 'merges null into an array'); + expect(merge(null, [42])).toEqual([null, 42]); + + // t.deepEqual( + // merge({ a: 'b' }, { a: 'c' }), + // { a: ['b', 'c'] }, + // 'merges two objects with the same key', + // ); + expect(merge({ a: 'b' }, { a: 'c' })).toEqual({ a: ['b', 'c'] }); + + var oneMerged = merge({ foo: 'bar' }, { foo: { first: '123' } }); + // t.deepEqual( + // oneMerged, + // { foo: ['bar', { first: '123' }] }, + // 'merges a standalone and an object into an array', + // ); + expect(oneMerged).toEqual({ foo: ['bar', { first: '123' }] }); + + var twoMerged = merge({ foo: ['bar', { first: '123' }] }, { foo: { second: '456' } }); + // t.deepEqual( + // twoMerged, + // { foo: { 0: 'bar', 1: { first: '123' }, second: '456' } }, + // 'merges a standalone and two objects into an array', + // ); + expect(twoMerged).toEqual({ foo: { 0: 'bar', 1: { first: '123' }, second: '456' } }); + + var sandwiched = merge({ foo: ['bar', { first: '123', second: '456' }] }, { foo: 'baz' }); + // t.deepEqual( + // sandwiched, + // { foo: ['bar', { first: '123', second: '456' }, 'baz'] }, + // 'merges an object sandwiched by two standalones into an array', + // ); + expect(sandwiched).toEqual({ foo: ['bar', { first: '123', second: '456' }, 'baz'] }); + + var nestedArrays = merge({ foo: ['baz'] }, { foo: ['bar', 'xyzzy'] }); + // t.deepEqual(nestedArrays, { foo: ['baz', 'bar', 'xyzzy'] }); + expect(nestedArrays).toEqual({ foo: ['baz', 'bar', 'xyzzy'] }); + + var noOptionsNonObjectSource = merge({ foo: 'baz' }, 'bar'); + // t.deepEqual(noOptionsNonObjectSource, { foo: 'baz', bar: true }); + expect(noOptionsNonObjectSource).toEqual({ foo: 'baz', bar: true }); + + (typeof Object.defineProperty !== 'function' ? test.skip : test)( + 'avoids invoking array setters unnecessarily', + function () { + var setCount = 0; + var getCount = 0; + var observed: any[] = []; + Object.defineProperty(observed, 0, { + get: function () { + getCount += 1; + return { bar: 'baz' }; + }, + set: function () { + setCount += 1; + }, + }); + merge(observed, [null]); + // st.equal(setCount, 0); + // st.equal(getCount, 1); + expect(setCount).toEqual(0); + expect(getCount).toEqual(1); + observed[0] = observed[0]; // eslint-disable-line no-self-assign + // st.equal(setCount, 1); + // st.equal(getCount, 2); + expect(setCount).toEqual(1); + expect(getCount).toEqual(2); + }, + ); +}); + +test('assign()', function () { + var target = { a: 1, b: 2 }; + var source = { b: 3, c: 4 }; + var result = assign_single_source(target, source); + + expect(result).toEqual(target); + expect(target).toEqual({ a: 1, b: 3, c: 4 }); + expect(source).toEqual({ b: 3, c: 4 }); +}); + +describe('combine()', function () { + test('both arrays', function () { + var a = [1]; + var b = [2]; + var combined = combine(a, b); + + // st.deepEqual(a, [1], 'a is not mutated'); + // st.deepEqual(b, [2], 'b is not mutated'); + // st.notEqual(a, combined, 'a !== combined'); + // st.notEqual(b, combined, 'b !== combined'); + // st.deepEqual(combined, [1, 2], 'combined is a + b'); + expect(a).toEqual([1]); + expect(b).toEqual([2]); + expect(combined).toEqual([1, 2]); + expect(a).not.toEqual(combined); + expect(b).not.toEqual(combined); + }); + + test('one array, one non-array', function () { + var aN = 1; + var a = [aN]; + var bN = 2; + var b = [bN]; + + var combinedAnB = combine(aN, b); + // st.deepEqual(b, [bN], 'b is not mutated'); + // st.notEqual(aN, combinedAnB, 'aN + b !== aN'); + // st.notEqual(a, combinedAnB, 'aN + b !== a'); + // st.notEqual(bN, combinedAnB, 'aN + b !== bN'); + // st.notEqual(b, combinedAnB, 'aN + b !== b'); + // st.deepEqual([1, 2], combinedAnB, 'first argument is array-wrapped when not an array'); + expect(b).toEqual([bN]); + expect(combinedAnB).not.toEqual(aN); + expect(combinedAnB).not.toEqual(a); + expect(combinedAnB).not.toEqual(bN); + expect(combinedAnB).not.toEqual(b); + expect(combinedAnB).toEqual([1, 2]); + + var combinedABn = combine(a, bN); + // st.deepEqual(a, [aN], 'a is not mutated'); + // st.notEqual(aN, combinedABn, 'a + bN !== aN'); + // st.notEqual(a, combinedABn, 'a + bN !== a'); + // st.notEqual(bN, combinedABn, 'a + bN !== bN'); + // st.notEqual(b, combinedABn, 'a + bN !== b'); + // st.deepEqual([1, 2], combinedABn, 'second argument is array-wrapped when not an array'); + expect(a).toEqual([aN]); + expect(combinedABn).not.toEqual(aN); + expect(combinedABn).not.toEqual(a); + expect(combinedABn).not.toEqual(bN); + expect(combinedABn).not.toEqual(b); + expect(combinedABn).toEqual([1, 2]); + }); + + test('neither is an array', function () { + var combined = combine(1, 2); + // st.notEqual(1, combined, '1 + 2 !== 1'); + // st.notEqual(2, combined, '1 + 2 !== 2'); + // st.deepEqual([1, 2], combined, 'both arguments are array-wrapped when not an array'); + expect(combined).not.toEqual(1); + expect(combined).not.toEqual(2); + expect(combined).toEqual([1, 2]); + }); +}); + +test('is_buffer()', function () { + for (const x of [null, undefined, true, false, '', 'abc', 42, 0, NaN, {}, [], function () {}, /a/g]) { + // t.equal(is_buffer(x), false, inspect(x) + ' is not a buffer'); + expect(is_buffer(x)).toEqual(false); + } + + var fakeBuffer = { constructor: Buffer }; + // t.equal(is_buffer(fakeBuffer), false, 'fake buffer is not a buffer'); + expect(is_buffer(fakeBuffer)).toEqual(false); + + var saferBuffer = Buffer.from('abc'); + // t.equal(is_buffer(saferBuffer), true, 'SaferBuffer instance is a buffer'); + expect(is_buffer(saferBuffer)).toEqual(true); + + var buffer = Buffer.from('abc'); + // t.equal(is_buffer(buffer), true, 'real Buffer instance is a buffer'); + expect(is_buffer(buffer)).toEqual(true); +}); diff --git a/yarn.lock b/yarn.lock index 68486892b..5a01e39e3 100644 --- a/yarn.lock +++ b/yarn.lock @@ -881,11 +881,6 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-18.11.18.tgz#8dfb97f0da23c2293e554c5a50d61ef134d7697f" integrity sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA== -"@types/qs@^6.9.15": - version "6.9.15" - resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.15.tgz#adde8a060ec9c305a82de1babc1056e73bd64dce" - integrity sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg== - "@types/semver@^7.5.0": version "7.5.3" resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.3.tgz#9a726e116beb26c24f1ccd6850201e1246122e04" @@ -1205,12 +1200,12 @@ brace-expansion@^2.0.1: dependencies: balanced-match "^1.0.0" -braces@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" - integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== +braces@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== dependencies: - fill-range "^7.0.1" + fill-range "^7.1.1" browserslist@^4.22.2: version "4.22.2" @@ -1248,17 +1243,6 @@ bundle-name@^3.0.0: dependencies: run-applescript "^5.0.0" -call-bind@^1.0.7: - version "1.0.7" - resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9" - integrity sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w== - dependencies: - es-define-property "^1.0.0" - es-errors "^1.3.0" - function-bind "^1.1.2" - get-intrinsic "^1.2.4" - set-function-length "^1.2.1" - callsites@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" @@ -1448,15 +1432,6 @@ default-browser@^4.0.0: execa "^7.1.1" titleize "^3.0.0" -define-data-property@^1.1.4: - version "1.1.4" - resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e" - integrity sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A== - dependencies: - es-define-property "^1.0.0" - es-errors "^1.3.0" - gopd "^1.0.1" - define-lazy-prop@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz#dbb19adfb746d7fc6d734a06b72f4a00d021255f" @@ -1523,18 +1498,6 @@ error-ex@^1.3.1: dependencies: is-arrayish "^0.2.1" -es-define-property@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.0.tgz#c7faefbdff8b2696cf5f46921edfb77cc4ba3845" - integrity sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ== - dependencies: - get-intrinsic "^1.2.4" - -es-errors@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" - integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== - escalade@^3.1.1: version "3.1.1" resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40" @@ -1806,10 +1769,10 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" -fill-range@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" - integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== +fill-range@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== dependencies: to-regex-range "^5.0.1" @@ -1889,17 +1852,6 @@ get-caller-file@^2.0.5: resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== -get-intrinsic@^1.1.3, get-intrinsic@^1.2.4: - version "1.2.4" - resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd" - integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ== - dependencies: - es-errors "^1.3.0" - function-bind "^1.1.2" - has-proto "^1.0.1" - has-symbols "^1.0.3" - hasown "^2.0.0" - get-package-type@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a" @@ -1965,13 +1917,6 @@ globby@^11.1.0: merge2 "^1.4.1" slash "^3.0.0" -gopd@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c" - integrity sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA== - dependencies: - get-intrinsic "^1.1.3" - graceful-fs@^4.2.9: version "4.2.11" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" @@ -1992,23 +1937,6 @@ has-flag@^4.0.0: resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== -has-property-descriptors@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854" - integrity sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg== - dependencies: - es-define-property "^1.0.0" - -has-proto@^1.0.1: - version "1.0.3" - resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.3.tgz#b31ddfe9b0e6e9914536a6ab286426d0214f77fd" - integrity sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q== - -has-symbols@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" - integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== - hasown@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c" @@ -2038,6 +1966,13 @@ humanize-ms@^1.2.1: dependencies: ms "^2.0.0" +iconv-lite@^0.6.3: + version "0.6.3" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501" + integrity sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw== + dependencies: + safer-buffer ">= 2.1.2 < 3.0.0" + ignore@^5.2.0, ignore@^5.2.4: version "5.2.4" resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.4.tgz#a291c0c6178ff1b960befe47fcdec301674a6324" @@ -2717,11 +2652,11 @@ merge2@^1.3.0, merge2@^1.4.1: integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== micromatch@^4.0.4: - version "4.0.5" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" - integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== + version "4.0.8" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== dependencies: - braces "^3.0.2" + braces "^3.0.3" picomatch "^2.3.1" mime-db@1.51.0: @@ -2826,11 +2761,6 @@ npm-run-path@^5.1.0: dependencies: path-key "^4.0.0" -object-inspect@^1.13.1: - version "1.13.2" - resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.2.tgz#dea0088467fb991e67af4058147a24824a3043ff" - integrity sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g== - once@^1.3.0: version "1.4.0" resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" @@ -3049,13 +2979,6 @@ pure-rand@^6.1.0: resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.1.0.tgz#d173cf23258231976ccbdb05247c9787957604f2" integrity sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA== -qs@^6.10.3: - version "6.13.0" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.13.0.tgz#6ca3bd58439f7e245655798997787b0d88a51906" - integrity sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg== - dependencies: - side-channel "^1.0.6" - queue-microtask@^1.2.2: version "1.2.3" resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" @@ -3142,6 +3065,11 @@ safe-buffer@~5.2.0: resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== +"safer-buffer@>= 2.1.2 < 3.0.0": + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + semver@^6.3.0, semver@^6.3.1: version "6.3.1" resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" @@ -3154,18 +3082,6 @@ semver@^7.5.3, semver@^7.5.4: dependencies: lru-cache "^6.0.0" -set-function-length@^1.2.1: - version "1.2.2" - resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" - integrity sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg== - dependencies: - define-data-property "^1.1.4" - es-errors "^1.3.0" - function-bind "^1.1.2" - get-intrinsic "^1.2.4" - gopd "^1.0.1" - has-property-descriptors "^1.0.2" - shebang-command@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" @@ -3178,16 +3094,6 @@ shebang-regex@^3.0.0: resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== -side-channel@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.6.tgz#abd25fb7cd24baf45466406b1096b7831c9215f2" - integrity sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA== - dependencies: - call-bind "^1.0.7" - es-errors "^1.3.0" - get-intrinsic "^1.2.4" - object-inspect "^1.13.1" - signal-exit@^3.0.3, signal-exit@^3.0.7: version "3.0.7" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" From b5dbd622fd6029cab6020282d17d81996d3171d0 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 17 Sep 2024 12:26:15 +0100 Subject: [PATCH 258/533] chore(tests): add query string tests to ecosystem tests --- ecosystem-tests/node-js/test.js | 37 ++++++++++++++++--- .../node-ts-cjs-auto/tests/test.ts | 9 +++++ .../node-ts-cjs-web/tests/test-jsdom.ts | 9 +++++ .../node-ts-cjs-web/tests/test-node.ts | 9 +++++ .../node-ts-cjs/tests/test-jsdom.ts | 9 +++++ .../node-ts-cjs/tests/test-node.ts | 9 +++++ .../node-ts-esm-auto/tests/test.ts | 9 +++++ ecosystem-tests/node-ts-esm-web/tests/test.ts | 9 +++++ .../node-ts-esm/tests/test-esnext.ts | 9 +++++ ecosystem-tests/node-ts-esm/tests/test.ts | 9 +++++ .../node-ts4.5-jest27/tests/test.ts | 9 +++++ .../ts-browser-webpack/src/index.ts | 9 +++++ .../vercel-edge/src/uploadWebApiTestCases.ts | 11 ++++++ 13 files changed, 142 insertions(+), 5 deletions(-) diff --git a/ecosystem-tests/node-js/test.js b/ecosystem-tests/node-js/test.js index 7f9f21736..e2a26f856 100644 --- a/ecosystem-tests/node-js/test.js +++ b/ecosystem-tests/node-js/test.js @@ -1,8 +1,35 @@ -const openaiKey = "a valid OpenAI key" const OpenAI = require('openai'); -console.log(OpenAI) +const openai = new OpenAI(); -const openai = new OpenAI({ - apiKey: openaiKey, -}); +function assertEqual(actual, expected) { + if (actual === expected) { + return; + } + + console.error('expected', expected); + console.error('actual ', actual); + throw new Error('expected values to be equal'); +} + +async function main() { + const completion = await openai.chat.completions.create({ + model: 'gpt-4', + messages: [{ role: 'user', content: 'Say this is a test' }], + }); + if (!completion.choices[0].message.content) { + console.dir(completion, { depth: 4 }); + throw new Error('no response content!'); + } + + assertEqual( + decodeURIComponent(openai.stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + 'foo[nested][a]=true&foo[nested][b]=foo', + ); + assertEqual( + decodeURIComponent(openai.stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + 'foo[nested][a][]=hello&foo[nested][a][]=world', + ); +} + +main(); diff --git a/ecosystem-tests/node-ts-cjs-auto/tests/test.ts b/ecosystem-tests/node-ts-cjs-auto/tests/test.ts index 84c99ee5a..203afba5a 100644 --- a/ecosystem-tests/node-ts-cjs-auto/tests/test.ts +++ b/ecosystem-tests/node-ts-cjs-auto/tests/test.ts @@ -257,3 +257,12 @@ describe('toFile', () => { expect(result.filename).toEqual('finetune.jsonl'); }); }); + +test('query strings', () => { + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + ).toEqual('foo[nested][a]=true&foo[nested][b]=foo'); + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + ).toEqual('foo[nested][a][]=hello&foo[nested][a][]=world'); +}); diff --git a/ecosystem-tests/node-ts-cjs-web/tests/test-jsdom.ts b/ecosystem-tests/node-ts-cjs-web/tests/test-jsdom.ts index adcb44858..e7b6be07d 100644 --- a/ecosystem-tests/node-ts-cjs-web/tests/test-jsdom.ts +++ b/ecosystem-tests/node-ts-cjs-web/tests/test-jsdom.ts @@ -164,3 +164,12 @@ describe.skip('toFile', () => { expect(result.filename).toEqual('finetune.jsonl'); }); }); + +test('query strings', () => { + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + ).toEqual('foo[nested][a]=true&foo[nested][b]=foo'); + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + ).toEqual('foo[nested][a][]=hello&foo[nested][a][]=world'); +}); diff --git a/ecosystem-tests/node-ts-cjs-web/tests/test-node.ts b/ecosystem-tests/node-ts-cjs-web/tests/test-node.ts index 1784f8d5e..668e65332 100644 --- a/ecosystem-tests/node-ts-cjs-web/tests/test-node.ts +++ b/ecosystem-tests/node-ts-cjs-web/tests/test-node.ts @@ -151,3 +151,12 @@ describe('toFile', () => { expect(result.filename).toEqual('finetune.jsonl'); }); }); + +test('query strings', () => { + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + ).toEqual('foo[nested][a]=true&foo[nested][b]=foo'); + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + ).toEqual('foo[nested][a][]=hello&foo[nested][a][]=world'); +}); diff --git a/ecosystem-tests/node-ts-cjs/tests/test-jsdom.ts b/ecosystem-tests/node-ts-cjs/tests/test-jsdom.ts index 9908e45f8..15b9df7c9 100644 --- a/ecosystem-tests/node-ts-cjs/tests/test-jsdom.ts +++ b/ecosystem-tests/node-ts-cjs/tests/test-jsdom.ts @@ -144,3 +144,12 @@ describe.skip('toFile', () => { expect(result.filename).toEqual('finetune.jsonl'); }); }); + +test('query strings', () => { + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + ).toEqual('foo[nested][a]=true&foo[nested][b]=foo'); + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + ).toEqual('foo[nested][a][]=hello&foo[nested][a][]=world'); +}); diff --git a/ecosystem-tests/node-ts-cjs/tests/test-node.ts b/ecosystem-tests/node-ts-cjs/tests/test-node.ts index 5ece57019..3f6e5d572 100644 --- a/ecosystem-tests/node-ts-cjs/tests/test-node.ts +++ b/ecosystem-tests/node-ts-cjs/tests/test-node.ts @@ -192,3 +192,12 @@ describe('toFile', () => { expect(result.filename).toEqual('finetune.jsonl'); }); }); + +test('query strings', () => { + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + ).toEqual('foo[nested][a]=true&foo[nested][b]=foo'); + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + ).toEqual('foo[nested][a][]=hello&foo[nested][a][]=world'); +}); diff --git a/ecosystem-tests/node-ts-esm-auto/tests/test.ts b/ecosystem-tests/node-ts-esm-auto/tests/test.ts index d28bc2b37..88beb2f54 100644 --- a/ecosystem-tests/node-ts-esm-auto/tests/test.ts +++ b/ecosystem-tests/node-ts-esm-auto/tests/test.ts @@ -194,3 +194,12 @@ describe('toFile', () => { expect(result.filename).toEqual('finetune.jsonl'); }); }); + +test('query strings', () => { + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + ).toEqual('foo[nested][a]=true&foo[nested][b]=foo'); + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + ).toEqual('foo[nested][a][]=hello&foo[nested][a][]=world'); +}); diff --git a/ecosystem-tests/node-ts-esm-web/tests/test.ts b/ecosystem-tests/node-ts-esm-web/tests/test.ts index e0055c89f..675fb9a73 100644 --- a/ecosystem-tests/node-ts-esm-web/tests/test.ts +++ b/ecosystem-tests/node-ts-esm-web/tests/test.ts @@ -152,3 +152,12 @@ describe('toFile', () => { expect(result.filename).toEqual('finetune.jsonl'); }); }); + +test('query strings', () => { + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + ).toEqual('foo[nested][a]=true&foo[nested][b]=foo'); + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + ).toEqual('foo[nested][a][]=hello&foo[nested][a][]=world'); +}); diff --git a/ecosystem-tests/node-ts-esm/tests/test-esnext.ts b/ecosystem-tests/node-ts-esm/tests/test-esnext.ts index d3b77971e..05cdd1047 100644 --- a/ecosystem-tests/node-ts-esm/tests/test-esnext.ts +++ b/ecosystem-tests/node-ts-esm/tests/test-esnext.ts @@ -64,3 +64,12 @@ it(`raw response`, async function () { const json: ChatCompletion = JSON.parse(chunks.join('')); expect(json.choices[0]?.message.content || '').toBeSimilarTo('This is a test', 10); }); + +test('query strings', () => { + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + ).toEqual('foo[nested][a]=true&foo[nested][b]=foo'); + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + ).toEqual('foo[nested][a][]=hello&foo[nested][a][]=world'); +}); diff --git a/ecosystem-tests/node-ts-esm/tests/test.ts b/ecosystem-tests/node-ts-esm/tests/test.ts index 906220e95..7694a9874 100644 --- a/ecosystem-tests/node-ts-esm/tests/test.ts +++ b/ecosystem-tests/node-ts-esm/tests/test.ts @@ -173,3 +173,12 @@ describe('toFile', () => { expect(result.filename).toEqual('finetune.jsonl'); }); }); + +test('query strings', () => { + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + ).toEqual('foo[nested][a]=true&foo[nested][b]=foo'); + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + ).toEqual('foo[nested][a][]=hello&foo[nested][a][]=world'); +}); diff --git a/ecosystem-tests/node-ts4.5-jest27/tests/test.ts b/ecosystem-tests/node-ts4.5-jest27/tests/test.ts index 5ece57019..3f6e5d572 100644 --- a/ecosystem-tests/node-ts4.5-jest27/tests/test.ts +++ b/ecosystem-tests/node-ts4.5-jest27/tests/test.ts @@ -192,3 +192,12 @@ describe('toFile', () => { expect(result.filename).toEqual('finetune.jsonl'); }); }); + +test('query strings', () => { + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + ).toEqual('foo[nested][a]=true&foo[nested][b]=foo'); + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + ).toEqual('foo[nested][a][]=hello&foo[nested][a][]=world'); +}); diff --git a/ecosystem-tests/ts-browser-webpack/src/index.ts b/ecosystem-tests/ts-browser-webpack/src/index.ts index b7821f568..75fb6ea7a 100644 --- a/ecosystem-tests/ts-browser-webpack/src/index.ts +++ b/ecosystem-tests/ts-browser-webpack/src/index.ts @@ -209,4 +209,13 @@ describe('toFile', () => { }); }); +it('handles query strings', () => { + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + ).toEqual('foo[nested][a]=true&foo[nested][b]=foo'); + expect( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + ).toEqual('foo[nested][a][]=hello&foo[nested][a][]=world'); +}); + runTests(); diff --git a/ecosystem-tests/vercel-edge/src/uploadWebApiTestCases.ts b/ecosystem-tests/vercel-edge/src/uploadWebApiTestCases.ts index 3f2c6b468..eb8be0030 100644 --- a/ecosystem-tests/vercel-edge/src/uploadWebApiTestCases.ts +++ b/ecosystem-tests/vercel-edge/src/uploadWebApiTestCases.ts @@ -180,4 +180,15 @@ export function uploadWebApiTestCases({ expectEqual(result.filename, 'finetune.jsonl'); }); } + + it('handles query strings', async () => { + expectEqual( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + 'foo[nested][a]=true&foo[nested][b]=foo', + ); + expectEqual( + decodeURIComponent((client as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + 'foo[nested][a][]=hello&foo[nested][a][]=world', + ); + }); } From 40e82841e6aac7c2c103053bf017d043cac6b8dc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 13:03:36 +0000 Subject: [PATCH 259/533] chore(internal): fix some types (#1082) --- src/internal/qs/stringify.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/internal/qs/stringify.ts b/src/internal/qs/stringify.ts index d0c450341..67497561a 100644 --- a/src/internal/qs/stringify.ts +++ b/src/internal/qs/stringify.ts @@ -204,7 +204,7 @@ function inner_stringify( strictNullHandling, skipNulls, encodeDotInKeys, - // @ts-expect-error + // @ts-ignore generateArrayPrefix === 'comma' && encodeValuesOnly && is_array(obj) ? null : encoder, filter, sort, @@ -224,7 +224,7 @@ function inner_stringify( function normalize_stringify_options( opts: StringifyOptions = defaults, -): NonNullableProperties { +): NonNullableProperties> & { indices?: boolean } { if (typeof opts.allowEmptyArrays !== 'undefined' && typeof opts.allowEmptyArrays !== 'boolean') { throw new TypeError('`allowEmptyArrays` option can only be `true` or `false`, when provided'); } @@ -299,7 +299,7 @@ function normalize_stringify_options( formatter: formatter, serializeDate: typeof opts.serializeDate === 'function' ? opts.serializeDate : defaults.serializeDate, skipNulls: typeof opts.skipNulls === 'boolean' ? opts.skipNulls : defaults.skipNulls, - // @ts-expect-error + // @ts-ignore sort: typeof opts.sort === 'function' ? opts.sort : null, strictNullHandling: typeof opts.strictNullHandling === 'boolean' ? opts.strictNullHandling : defaults.strictNullHandling, From 0341ff618866200f0cc33a61037fe789338be799 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 3 Sep 2024 16:22:30 +0100 Subject: [PATCH 260/533] chore(internal): add ecosystem test for qs reproduction --- ecosystem-tests/cli.ts | 8 + ecosystem-tests/nodenext-tsup/index.ts | 41 + .../nodenext-tsup/package-lock.json | 2078 +++++++++++++++++ ecosystem-tests/nodenext-tsup/package.json | 16 + ecosystem-tests/nodenext-tsup/tsconfig.json | 18 + ecosystem-tests/nodenext-tsup/tsup.config.ts | 7 + 6 files changed, 2168 insertions(+) create mode 100644 ecosystem-tests/nodenext-tsup/index.ts create mode 100644 ecosystem-tests/nodenext-tsup/package-lock.json create mode 100644 ecosystem-tests/nodenext-tsup/package.json create mode 100644 ecosystem-tests/nodenext-tsup/tsconfig.json create mode 100644 ecosystem-tests/nodenext-tsup/tsup.config.ts diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index 2d9702112..550512634 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -34,6 +34,14 @@ const projectRunners = { await installPackage(); await run('node', ['test.js']); }, + 'nodenext-tsup': async () => { + await installPackage(); + await run('npm', ['run', 'build']); + + if (state.live) { + await run('npm', ['run', 'main']); + } + }, 'ts-browser-webpack': async () => { await installPackage(); diff --git a/ecosystem-tests/nodenext-tsup/index.ts b/ecosystem-tests/nodenext-tsup/index.ts new file mode 100644 index 000000000..f70568435 --- /dev/null +++ b/ecosystem-tests/nodenext-tsup/index.ts @@ -0,0 +1,41 @@ +import { OpenAI } from 'openai'; + +const openai = new OpenAI(); + +function assertEqual(actual: any, expected: any) { + if (actual === expected) { + return; + } + + console.error('expected', expected); + console.error('actual ', actual); + throw new Error('expected values to be equal'); +} + +async function main() { + const completion = await openai.chat.completions.create({ + model: 'gpt-4o-mini', + messages: [ + { + role: 'user', + content: 'What is the capital of the United States?', + }, + ], + }); + // smoke test for responses + if (!completion.choices[0]?.message.content) { + console.dir(completion, { depth: 4 }); + throw new Error('no response content!'); + } + + assertEqual( + decodeURIComponent((openai as any).stringifyQuery({ foo: { nested: { a: true, b: 'foo' } } })), + 'foo[nested][a]=true&foo[nested][b]=foo', + ); + assertEqual( + decodeURIComponent((openai as any).stringifyQuery({ foo: { nested: { a: ['hello', 'world'] } } })), + 'foo[nested][a][]=hello&foo[nested][a][]=world', + ); +} + +main(); diff --git a/ecosystem-tests/nodenext-tsup/package-lock.json b/ecosystem-tests/nodenext-tsup/package-lock.json new file mode 100644 index 000000000..8f4729374 --- /dev/null +++ b/ecosystem-tests/nodenext-tsup/package-lock.json @@ -0,0 +1,2078 @@ +{ + "name": "nodenext-tsup", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "nodenext-tsup", + "devDependencies": { + "tsup": "^8.2.4" + }, + "peerDependencies": { + "typescript": "^5.5.4" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.23.1.tgz", + "integrity": "sha512-6VhYk1diRqrhBAqpJEdjASR/+WVRtfjpqKuNw11cLiaWpAT/Uu+nokB+UJnevzy/P9C/ty6AOe0dwueMrGh/iQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.23.1.tgz", + "integrity": "sha512-uz6/tEy2IFm9RYOyvKl88zdzZfwEfKZmnX9Cj1BHjeSGNuGLuMD1kR8y5bteYmwqKm1tj8m4cb/aKEorr6fHWQ==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.23.1.tgz", + "integrity": "sha512-xw50ipykXcLstLeWH7WRdQuysJqejuAGPd30vd1i5zSyKK3WE+ijzHmLKxdiCMtH1pHz78rOg0BKSYOSB/2Khw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.23.1.tgz", + "integrity": "sha512-nlN9B69St9BwUoB+jkyU090bru8L0NA3yFvAd7k8dNsVH8bi9a8cUAUSEcEEgTp2z3dbEDGJGfP6VUnkQnlReg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.23.1.tgz", + "integrity": "sha512-YsS2e3Wtgnw7Wq53XXBLcV6JhRsEq8hkfg91ESVadIrzr9wO6jJDMZnCQbHm1Guc5t/CdDiFSSfWP58FNuvT3Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.23.1.tgz", + "integrity": "sha512-aClqdgTDVPSEGgoCS8QDG37Gu8yc9lTHNAQlsztQ6ENetKEO//b8y31MMu2ZaPbn4kVsIABzVLXYLhCGekGDqw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.23.1.tgz", + "integrity": "sha512-h1k6yS8/pN/NHlMl5+v4XPfikhJulk4G+tKGFIOwURBSFzE8bixw1ebjluLOjfwtLqY0kewfjLSrO6tN2MgIhA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.23.1.tgz", + "integrity": "sha512-lK1eJeyk1ZX8UklqFd/3A60UuZ/6UVfGT2LuGo3Wp4/z7eRTRYY+0xOu2kpClP+vMTi9wKOfXi2vjUpO1Ro76g==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.23.1.tgz", + "integrity": "sha512-CXXkzgn+dXAPs3WBwE+Kvnrf4WECwBdfjfeYHpMeVxWE0EceB6vhWGShs6wi0IYEqMSIzdOF1XjQ/Mkm5d7ZdQ==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.23.1.tgz", + "integrity": "sha512-/93bf2yxencYDnItMYV/v116zff6UyTjo4EtEQjUBeGiVpMmffDNUyD9UN2zV+V3LRV3/on4xdZ26NKzn6754g==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.23.1.tgz", + "integrity": "sha512-VTN4EuOHwXEkXzX5nTvVY4s7E/Krz7COC8xkftbbKRYAl96vPiUssGkeMELQMOnLOJ8k3BY1+ZY52tttZnHcXQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.23.1.tgz", + "integrity": "sha512-Vx09LzEoBa5zDnieH8LSMRToj7ir/Jeq0Gu6qJ/1GcBq9GkfoEAoXvLiW1U9J1qE/Y/Oyaq33w5p2ZWrNNHNEw==", + "cpu": [ + "loong64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.23.1.tgz", + "integrity": "sha512-nrFzzMQ7W4WRLNUOU5dlWAqa6yVeI0P78WKGUo7lg2HShq/yx+UYkeNSE0SSfSure0SqgnsxPvmAUu/vu0E+3Q==", + "cpu": [ + "mips64el" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.23.1.tgz", + "integrity": "sha512-dKN8fgVqd0vUIjxuJI6P/9SSSe/mB9rvA98CSH2sJnlZ/OCZWO1DJvxj8jvKTfYUdGfcq2dDxoKaC6bHuTlgcw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.23.1.tgz", + "integrity": "sha512-5AV4Pzp80fhHL83JM6LoA6pTQVWgB1HovMBsLQ9OZWLDqVY8MVobBXNSmAJi//Csh6tcY7e7Lny2Hg1tElMjIA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.23.1.tgz", + "integrity": "sha512-9ygs73tuFCe6f6m/Tb+9LtYxWR4c9yg7zjt2cYkjDbDpV/xVn+68cQxMXCjUpYwEkze2RcU/rMnfIXNRFmSoDw==", + "cpu": [ + "s390x" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.23.1.tgz", + "integrity": "sha512-EV6+ovTsEXCPAp58g2dD68LxoP/wK5pRvgy0J/HxPGB009omFPv3Yet0HiaqvrIrgPTBuC6wCH1LTOY91EO5hQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.23.1.tgz", + "integrity": "sha512-aevEkCNu7KlPRpYLjwmdcuNz6bDFiE7Z8XC4CPqExjTvrHugh28QzUXVOZtiYghciKUacNktqxdpymplil1beA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.23.1.tgz", + "integrity": "sha512-3x37szhLexNA4bXhLrCC/LImN/YtWis6WXr1VESlfVtVeoFJBRINPJ3f0a/6LV8zpikqoUg4hyXw0sFBt5Cr+Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.23.1.tgz", + "integrity": "sha512-aY2gMmKmPhxfU+0EdnN+XNtGbjfQgwZj43k8G3fyrDM/UdZww6xrWxmDkuz2eCZchqVeABjV5BpildOrUbBTqA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.23.1.tgz", + "integrity": "sha512-RBRT2gqEl0IKQABT4XTj78tpk9v7ehp+mazn2HbUeZl1YMdaGAQqhapjGTCe7uw7y0frDi4gS0uHzhvpFuI1sA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.23.1.tgz", + "integrity": "sha512-4O+gPR5rEBe2FpKOVyiJ7wNDPA8nGzDuJ6gN4okSA1gEOYZ67N8JPk58tkWtdtPeLz7lBnY6I5L3jdsr3S+A6A==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.23.1.tgz", + "integrity": "sha512-BcaL0Vn6QwCwre3Y717nVHZbAa4UBEigzFm6VdsVdT/MbZ38xoj1X9HPkZhbmaBGUD1W8vxAfffbDe8bA6AKnQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.23.1.tgz", + "integrity": "sha512-BHpFFeslkWrXWyUPnbKm+xYYVYruCinGcftSBaa8zoF9hZO4BcSCFUvHVTtzpIY6YzUnYtuEhZ+C9iEXjxnasg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "dev": true, + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "dev": true + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.21.2.tgz", + "integrity": "sha512-fSuPrt0ZO8uXeS+xP3b+yYTCBUd05MoSp2N/MFOgjhhUhMmchXlpTQrTpI8T+YAwAQuK7MafsCOxW7VrPMrJcg==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.21.2.tgz", + "integrity": "sha512-xGU5ZQmPlsjQS6tzTTGwMsnKUtu0WVbl0hYpTPauvbRAnmIvpInhJtgjj3mcuJpEiuUw4v1s4BimkdfDWlh7gA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.21.2.tgz", + "integrity": "sha512-99AhQ3/ZMxU7jw34Sq8brzXqWH/bMnf7ZVhvLk9QU2cOepbQSVTns6qoErJmSiAvU3InRqC2RRZ5ovh1KN0d0Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.21.2.tgz", + "integrity": "sha512-ZbRaUvw2iN/y37x6dY50D8m2BnDbBjlnMPotDi/qITMJ4sIxNY33HArjikDyakhSv0+ybdUxhWxE6kTI4oX26w==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.21.2.tgz", + "integrity": "sha512-ztRJJMiE8nnU1YFcdbd9BcH6bGWG1z+jP+IPW2oDUAPxPjo9dverIOyXz76m6IPA6udEL12reYeLojzW2cYL7w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.21.2.tgz", + "integrity": "sha512-flOcGHDZajGKYpLV0JNc0VFH361M7rnV1ee+NTeC/BQQ1/0pllYcFmxpagltANYt8FYf9+kL6RSk80Ziwyhr7w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.21.2.tgz", + "integrity": "sha512-69CF19Kp3TdMopyteO/LJbWufOzqqXzkrv4L2sP8kfMaAQ6iwky7NoXTp7bD6/irKgknDKM0P9E/1l5XxVQAhw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.21.2.tgz", + "integrity": "sha512-48pD/fJkTiHAZTnZwR0VzHrao70/4MlzJrq0ZsILjLW/Ab/1XlVUStYyGt7tdyIiVSlGZbnliqmult/QGA2O2w==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.21.2.tgz", + "integrity": "sha512-cZdyuInj0ofc7mAQpKcPR2a2iu4YM4FQfuUzCVA2u4HI95lCwzjoPtdWjdpDKyHxI0UO82bLDoOaLfpZ/wviyQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.21.2.tgz", + "integrity": "sha512-RL56JMT6NwQ0lXIQmMIWr1SW28z4E4pOhRRNqwWZeXpRlykRIlEpSWdsgNWJbYBEWD84eocjSGDu/XxbYeCmwg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.21.2.tgz", + "integrity": "sha512-PMxkrWS9z38bCr3rWvDFVGD6sFeZJw4iQlhrup7ReGmfn7Oukrr/zweLhYX6v2/8J6Cep9IEA/SmjXjCmSbrMQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.21.2.tgz", + "integrity": "sha512-B90tYAUoLhU22olrafY3JQCFLnT3NglazdwkHyxNDYF/zAxJt5fJUB/yBoWFoIQ7SQj+KLe3iL4BhOMa9fzgpw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.21.2.tgz", + "integrity": "sha512-7twFizNXudESmC9oneLGIUmoHiiLppz/Xs5uJQ4ShvE6234K0VB1/aJYU3f/4g7PhssLGKBVCC37uRkkOi8wjg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.21.2.tgz", + "integrity": "sha512-9rRero0E7qTeYf6+rFh3AErTNU1VCQg2mn7CQcI44vNUWM9Ze7MSRS/9RFuSsox+vstRt97+x3sOhEey024FRQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.21.2.tgz", + "integrity": "sha512-5rA4vjlqgrpbFVVHX3qkrCo/fZTj1q0Xxpg+Z7yIo3J2AilW7t2+n6Q8Jrx+4MrYpAnjttTYF8rr7bP46BPzRw==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.21.2.tgz", + "integrity": "sha512-6UUxd0+SKomjdzuAcp+HAmxw1FlGBnl1v2yEPSabtx4lBfdXHDVsW7+lQkgz9cNFJGY3AWR7+V8P5BqkD9L9nA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==", + "dev": true + }, + "node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/bundle-require": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/bundle-require/-/bundle-require-5.0.0.tgz", + "integrity": "sha512-GuziW3fSSmopcx4KRymQEJVbZUfqlCqcq7dvs6TYwKRZiegK/2buMxQTPs6MGlNv50wms1699qYO54R8XfRX4w==", + "dev": true, + "dependencies": { + "load-tsconfig": "^0.2.3" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "peerDependencies": { + "esbuild": ">=0.18" + } + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/consola": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/consola/-/consola-3.2.3.tgz", + "integrity": "sha512-I5qxpzLv+sJhTVEoLYNcTW+bThDCPsit0vLNKShZx6rLtpilNpmmeTPaeqJb9ZE9dV3DGaeby6Vuhrw38WjeyQ==", + "dev": true, + "engines": { + "node": "^14.18.0 || >=16.10.0" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.6.tgz", + "integrity": "sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg==", + "dev": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/debug/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true + }, + "node_modules/esbuild": { + "version": "0.23.1", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.23.1.tgz", + "integrity": "sha512-VVNz/9Sa0bs5SELtn3f7qhJCDPCF5oMEl5cO9/SSinpE9hbPVvxbd572HH5AKiP7WD8INO53GgfDDhRjkylHEg==", + "dev": true, + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.23.1", + "@esbuild/android-arm": "0.23.1", + "@esbuild/android-arm64": "0.23.1", + "@esbuild/android-x64": "0.23.1", + "@esbuild/darwin-arm64": "0.23.1", + "@esbuild/darwin-x64": "0.23.1", + "@esbuild/freebsd-arm64": "0.23.1", + "@esbuild/freebsd-x64": "0.23.1", + "@esbuild/linux-arm": "0.23.1", + "@esbuild/linux-arm64": "0.23.1", + "@esbuild/linux-ia32": "0.23.1", + "@esbuild/linux-loong64": "0.23.1", + "@esbuild/linux-mips64el": "0.23.1", + "@esbuild/linux-ppc64": "0.23.1", + "@esbuild/linux-riscv64": "0.23.1", + "@esbuild/linux-s390x": "0.23.1", + "@esbuild/linux-x64": "0.23.1", + "@esbuild/netbsd-x64": "0.23.1", + "@esbuild/openbsd-arm64": "0.23.1", + "@esbuild/openbsd-x64": "0.23.1", + "@esbuild/sunos-x64": "0.23.1", + "@esbuild/win32-arm64": "0.23.1", + "@esbuild/win32-ia32": "0.23.1", + "@esbuild/win32-x64": "0.23.1" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dev": true, + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/foreground-child": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.0.tgz", + "integrity": "sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/foreground-child/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dev": true, + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dev": true, + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/joycon": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/joycon/-/joycon-3.1.1.tgz", + "integrity": "sha512-34wB/Y7MW7bzjKRjUKTa46I2Z7eV62Rkhva+KkopW7Qvv/OSWBqvkSY7vusOPrNuZcUG3tApvdVgNB8POj3SPw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/lilconfig": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", + "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, + "node_modules/load-tsconfig": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/load-tsconfig/-/load-tsconfig-0.2.5.tgz", + "integrity": "sha512-IXO6OCs9yg8tMKzfPZ1YmheJbZCiEsnBdcB03l0OcfK9prKnJb96siuHCr5Fl37/yo9DnKU+TLpxzTUspw9shg==", + "dev": true, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/lodash.sortby": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz", + "integrity": "sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==", + "dev": true + }, + "node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.0.tgz", + "integrity": "sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw==", + "dev": true + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", + "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==", + "dev": true + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", + "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/postcss-load-config": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", + "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "lilconfig": "^3.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "jiti": ">=1.21.0", + "postcss": ">=8.0.9", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + }, + "postcss": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rollup": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.21.2.tgz", + "integrity": "sha512-e3TapAgYf9xjdLvKQCkQTnbTKd4a6jwlpQSJJFokHGaX2IVjoEqkIIhiQfqsi0cdwlOD+tQGuOd5AJkc5RngBw==", + "dev": true, + "dependencies": { + "@types/estree": "1.0.5" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.21.2", + "@rollup/rollup-android-arm64": "4.21.2", + "@rollup/rollup-darwin-arm64": "4.21.2", + "@rollup/rollup-darwin-x64": "4.21.2", + "@rollup/rollup-linux-arm-gnueabihf": "4.21.2", + "@rollup/rollup-linux-arm-musleabihf": "4.21.2", + "@rollup/rollup-linux-arm64-gnu": "4.21.2", + "@rollup/rollup-linux-arm64-musl": "4.21.2", + "@rollup/rollup-linux-powerpc64le-gnu": "4.21.2", + "@rollup/rollup-linux-riscv64-gnu": "4.21.2", + "@rollup/rollup-linux-s390x-gnu": "4.21.2", + "@rollup/rollup-linux-x64-gnu": "4.21.2", + "@rollup/rollup-linux-x64-musl": "4.21.2", + "@rollup/rollup-win32-arm64-msvc": "4.21.2", + "@rollup/rollup-win32-ia32-msvc": "4.21.2", + "@rollup/rollup-win32-x64-msvc": "4.21.2", + "fsevents": "~2.3.2" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.8.0-beta.0", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.8.0-beta.0.tgz", + "integrity": "sha512-2ymg6oRBpebeZi9UUNsgQ89bhx01TcTkmNTGnNO88imTmbSgy4nfujrgVEFKWpMTEGA11EDkTt7mqObTPdigIA==", + "dev": true, + "dependencies": { + "whatwg-url": "^7.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/source-map/node_modules/tr46": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-1.0.1.tgz", + "integrity": "sha512-dTpowEjclQ7Kgx5SdBkqRzVhERQXov8/l9Ft9dVM9fmg0W0KQSVaXX9T4i6twCPNtYiZM53lpSSUAwJbFPOHxA==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/source-map/node_modules/webidl-conversions": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz", + "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==", + "dev": true + }, + "node_modules/source-map/node_modules/whatwg-url": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-7.1.0.tgz", + "integrity": "sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==", + "dev": true, + "dependencies": { + "lodash.sortby": "^4.7.0", + "tr46": "^1.0.1", + "webidl-conversions": "^4.0.2" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/string-width-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/sucrase": { + "version": "3.35.0", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", + "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "glob": "^10.3.10", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tree-kill": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/tree-kill/-/tree-kill-1.2.2.tgz", + "integrity": "sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==", + "dev": true, + "bin": { + "tree-kill": "cli.js" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true + }, + "node_modules/tsup": { + "version": "8.2.4", + "resolved": "https://registry.npmjs.org/tsup/-/tsup-8.2.4.tgz", + "integrity": "sha512-akpCPePnBnC/CXgRrcy72ZSntgIEUa1jN0oJbbvpALWKNOz1B7aM+UVDWGRGIO/T/PZugAESWDJUAb5FD48o8Q==", + "dev": true, + "dependencies": { + "bundle-require": "^5.0.0", + "cac": "^6.7.14", + "chokidar": "^3.6.0", + "consola": "^3.2.3", + "debug": "^4.3.5", + "esbuild": "^0.23.0", + "execa": "^5.1.1", + "globby": "^11.1.0", + "joycon": "^3.1.1", + "picocolors": "^1.0.1", + "postcss-load-config": "^6.0.1", + "resolve-from": "^5.0.0", + "rollup": "^4.19.0", + "source-map": "0.8.0-beta.0", + "sucrase": "^3.35.0", + "tree-kill": "^1.2.2" + }, + "bin": { + "tsup": "dist/cli-default.js", + "tsup-node": "dist/cli-node.js" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@microsoft/api-extractor": "^7.36.0", + "@swc/core": "^1", + "postcss": "^8.4.12", + "typescript": ">=4.5.0" + }, + "peerDependenciesMeta": { + "@microsoft/api-extractor": { + "optional": true + }, + "@swc/core": { + "optional": true + }, + "postcss": { + "optional": true + }, + "typescript": { + "optional": true + } + } + }, + "node_modules/typescript": { + "version": "5.5.4", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.4.tgz", + "integrity": "sha512-Mtq29sKDAEYP7aljRgtPOpTvOfbwRWlS6dPRzwjdE+C0R4brX/GUyhHSecbHMFLNBLcJIPt9nl9yG5TZ1weH+Q==", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + } + } +} diff --git a/ecosystem-tests/nodenext-tsup/package.json b/ecosystem-tests/nodenext-tsup/package.json new file mode 100644 index 000000000..ddef80219 --- /dev/null +++ b/ecosystem-tests/nodenext-tsup/package.json @@ -0,0 +1,16 @@ +{ + "name": "nodenext-tsup", + "module": "index.ts", + "type": "module", + "scripts": { + "build": "tsup", + "main": "npm run build && node dist/index.cjs" + }, + "dependencies": {}, + "devDependencies": { + "tsup": "^8.2.4" + }, + "peerDependencies": { + "typescript": "^5.5.4" + } +} diff --git a/ecosystem-tests/nodenext-tsup/tsconfig.json b/ecosystem-tests/nodenext-tsup/tsconfig.json new file mode 100644 index 000000000..49111f4a1 --- /dev/null +++ b/ecosystem-tests/nodenext-tsup/tsconfig.json @@ -0,0 +1,18 @@ +{ + "compilerOptions": { + "declaration": true, + "declarationMap": true, + "esModuleInterop": true, + "incremental": false, + "isolatedModules": true, + "lib": ["es2022", "DOM", "DOM.Iterable"], + "module": "NodeNext", + "moduleDetection": "force", + "moduleResolution": "NodeNext", + "noUncheckedIndexedAccess": true, + "resolveJsonModule": true, + "skipLibCheck": true, + "strict": true, + "target": "ES2022" + } +} diff --git a/ecosystem-tests/nodenext-tsup/tsup.config.ts b/ecosystem-tests/nodenext-tsup/tsup.config.ts new file mode 100644 index 000000000..657cd79fa --- /dev/null +++ b/ecosystem-tests/nodenext-tsup/tsup.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from "tsup"; + +export default defineConfig({ + entry: ["index.ts"], + noExternal: ["openai"], + platform: "neutral", +}); From 7110f07cb79f27f69341b01b75f8a1f102ceab21 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 16:41:48 +0000 Subject: [PATCH 261/533] release: 4.62.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 16 ++++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 21 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 8f32b4daf..281a52027 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.61.1" + ".": "4.62.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 1fc81d615..87fff3e15 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog +## 4.62.0 (2024-09-17) + +Full Changelog: [v4.61.1...v4.62.0](https://github.com/openai/openai-node/compare/v4.61.1...v4.62.0) + +### Features + +* **client:** add ._request_id property to object responses ([#1078](https://github.com/openai/openai-node/issues/1078)) ([d5c2131](https://github.com/openai/openai-node/commit/d5c21314449091dd1c668c7358b25e041466f588)) + + +### Chores + +* **internal:** add ecosystem test for qs reproduction ([0199dd8](https://github.com/openai/openai-node/commit/0199dd85981497fac2b60f786acc00ea30683897)) +* **internal:** add query string encoder ([#1079](https://github.com/openai/openai-node/issues/1079)) ([f870682](https://github.com/openai/openai-node/commit/f870682d5c490182547c428b0b5c75da0e34d15a)) +* **internal:** fix some types ([#1082](https://github.com/openai/openai-node/issues/1082)) ([1ec41a7](https://github.com/openai/openai-node/commit/1ec41a7d768502a31abb33bf86b0539e5b4b6541)) +* **tests:** add query string tests to ecosystem tests ([36be724](https://github.com/openai/openai-node/commit/36be724384401bb697d8b07b0a1965be721cfa51)) + ## 4.61.1 (2024-09-16) Full Changelog: [v4.61.0...v4.61.1](https://github.com/openai/openai-node/compare/v4.61.0...v4.61.1) diff --git a/README.md b/README.md index b3de7fa55..88774c05b 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.61.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.62.0/mod.ts'; ``` diff --git a/package.json b/package.json index a65c9d6ba..69dab3ea5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.61.1", + "version": "4.62.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 641b61c02..e773b7fc5 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.61.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.62.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 6c1e0cb8e..70b394b16 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.61.1'; // x-release-please-version +export const VERSION = '4.62.0'; // x-release-please-version From 30a90ccca338c468aefcf98af703a9792ce424f2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 13:43:47 +0000 Subject: [PATCH 262/533] fix(types): remove leftover polyfill usage (#1084) --- src/_shims/node-types.d.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/_shims/node-types.d.ts b/src/_shims/node-types.d.ts index b31698f78..c159e5fa7 100644 --- a/src/_shims/node-types.d.ts +++ b/src/_shims/node-types.d.ts @@ -7,7 +7,7 @@ import * as fd from 'formdata-node'; export { type Agent } from 'node:http'; export { type Readable } from 'node:stream'; export { type ReadStream as FsReadStream } from 'node:fs'; -export { ReadableStream } from 'web-streams-polyfill'; +export { ReadableStream } from 'node:stream/web'; export const fetch: typeof nf.default; From 17ef084839dc3a152fb489b5c1fcad4280b98edc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 13:44:15 +0000 Subject: [PATCH 263/533] release: 4.62.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 281a52027..7a0305307 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.62.0" + ".": "4.62.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 87fff3e15..cdf113343 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.62.1 (2024-09-18) + +Full Changelog: [v4.62.0...v4.62.1](https://github.com/openai/openai-node/compare/v4.62.0...v4.62.1) + +### Bug Fixes + +* **types:** remove leftover polyfill usage ([#1084](https://github.com/openai/openai-node/issues/1084)) ([b7c9538](https://github.com/openai/openai-node/commit/b7c9538981a11005fb0a00774683d979d3ca663a)) + ## 4.62.0 (2024-09-17) Full Changelog: [v4.61.1...v4.62.0](https://github.com/openai/openai-node/compare/v4.61.1...v4.62.0) diff --git a/README.md b/README.md index 88774c05b..55e6c291d 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.62.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.62.1/mod.ts'; ``` diff --git a/package.json b/package.json index 69dab3ea5..146f1e6f9 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.62.0", + "version": "4.62.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index e773b7fc5..ef51b1e45 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.62.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.62.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 70b394b16..1cfad9ed4 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.62.0'; // x-release-please-version +export const VERSION = '4.62.1'; // x-release-please-version From e5b3d50d9cdf57c023dbead988617d8abcdb99a5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 16:11:32 +0000 Subject: [PATCH 264/533] feat(client): send retry count header (#1087) --- src/core.ts | 16 ++++++++++++---- tests/index.test.ts | 25 +++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/src/core.ts b/src/core.ts index 90714d3ce..877ae8de1 100644 --- a/src/core.ts +++ b/src/core.ts @@ -308,7 +308,10 @@ export abstract class APIClient { return null; } - buildRequest(options: FinalRequestOptions): { req: RequestInit; url: string; timeout: number } { + buildRequest( + options: FinalRequestOptions, + { retryCount = 0 }: { retryCount?: number } = {}, + ): { req: RequestInit; url: string; timeout: number } { const { method, path, query, headers: headers = {} } = options; const body = @@ -340,7 +343,7 @@ export abstract class APIClient { headers[this.idempotencyHeader] = options.idempotencyKey; } - const reqHeaders = this.buildHeaders({ options, headers, contentLength }); + const reqHeaders = this.buildHeaders({ options, headers, contentLength, retryCount }); const req: RequestInit = { method, @@ -359,10 +362,12 @@ export abstract class APIClient { options, headers, contentLength, + retryCount, }: { options: FinalRequestOptions; headers: Record; contentLength: string | null | undefined; + retryCount: number; }): Record { const reqHeaders: Record = {}; if (contentLength) { @@ -378,6 +383,8 @@ export abstract class APIClient { delete reqHeaders['content-type']; } + reqHeaders['x-stainless-retry-count'] = String(retryCount); + this.validateHeaders(reqHeaders, headers); return reqHeaders; @@ -429,13 +436,14 @@ export abstract class APIClient { retriesRemaining: number | null, ): Promise { const options = await optionsInput; + const maxRetries = options.maxRetries ?? this.maxRetries; if (retriesRemaining == null) { - retriesRemaining = options.maxRetries ?? this.maxRetries; + retriesRemaining = maxRetries; } await this.prepareOptions(options); - const { req, url, timeout } = this.buildRequest(options); + const { req, url, timeout } = this.buildRequest(options, { retryCount: maxRetries - retriesRemaining }); await this.prepareRequest(req, { url, options }); diff --git a/tests/index.test.ts b/tests/index.test.ts index cd5f2a0a9..a6fa97199 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -241,6 +241,31 @@ describe('retries', () => { expect(count).toEqual(3); }); + test('retry count header', async () => { + let count = 0; + let capturedRequest: RequestInit | undefined; + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { + count++; + if (count <= 2) { + return new Response(undefined, { + status: 429, + headers: { + 'Retry-After': '0.1', + }, + }); + } + capturedRequest = init; + return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); + }; + + const client = new OpenAI({ apiKey: 'My API Key', fetch: testFetch, maxRetries: 4 }); + + expect(await client.request({ path: '/foo', method: 'get' })).toEqual({ a: 1 }); + + expect((capturedRequest!.headers as Headers)['x-stainless-retry-count']).toEqual('2'); + expect(count).toEqual(3); + }); + test('retry on 429 with retry-after', async () => { let count = 0; const testFetch = async (url: RequestInfo, { signal }: RequestInit = {}): Promise => { From 8b9624400d799ce690e2a394a7ef3b76fbca1dfa Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 16:21:05 +0000 Subject: [PATCH 265/533] chore(types): improve type name for embedding models (#1089) --- .stats.yml | 2 +- api.md | 1 + src/index.ts | 1 + src/resources/embeddings.ts | 5 ++++- src/resources/index.ts | 8 +++++++- 5 files changed, 14 insertions(+), 3 deletions(-) diff --git a/.stats.yml b/.stats.yml index 2fc39385e..0151c5a10 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-ff407aa10917e62f2b0c12d1ad2c4f1258ed083bd45753c70eaaf5b1cf8356ae.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-de1981b64ac229493473670d618500c6362c195f1057eb7de00bd1bc9184fbd5.yml diff --git a/api.md b/api.md index 7fb8f86a6..f38ab69be 100644 --- a/api.md +++ b/api.md @@ -64,6 +64,7 @@ Types: - CreateEmbeddingResponse - Embedding +- EmbeddingModel Methods: diff --git a/src/index.ts b/src/index.ts index b52406f6c..7fed1dc8c 100644 --- a/src/index.ts +++ b/src/index.ts @@ -276,6 +276,7 @@ export namespace OpenAI { export import Embeddings = API.Embeddings; export import CreateEmbeddingResponse = API.CreateEmbeddingResponse; export import Embedding = API.Embedding; + export import EmbeddingModel = API.EmbeddingModel; export import EmbeddingCreateParams = API.EmbeddingCreateParams; export import Files = API.Files; diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index f72b9308a..6d8e670a7 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -77,6 +77,8 @@ export interface Embedding { object: 'embedding'; } +export type EmbeddingModel = 'text-embedding-ada-002' | 'text-embedding-3-small' | 'text-embedding-3-large'; + export interface EmbeddingCreateParams { /** * Input text to embed, encoded as a string or array of tokens. To embed multiple @@ -96,7 +98,7 @@ export interface EmbeddingCreateParams { * [Model overview](https://platform.openai.com/docs/models/overview) for * descriptions of them. */ - model: (string & {}) | 'text-embedding-ada-002' | 'text-embedding-3-small' | 'text-embedding-3-large'; + model: (string & {}) | EmbeddingModel; /** * The number of dimensions the resulting output embeddings should have. Only @@ -121,5 +123,6 @@ export interface EmbeddingCreateParams { export namespace Embeddings { export import CreateEmbeddingResponse = EmbeddingsAPI.CreateEmbeddingResponse; export import Embedding = EmbeddingsAPI.Embedding; + export import EmbeddingModel = EmbeddingsAPI.EmbeddingModel; export import EmbeddingCreateParams = EmbeddingsAPI.EmbeddingCreateParams; } diff --git a/src/resources/index.ts b/src/resources/index.ts index a78808584..68bd88a31 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -22,7 +22,13 @@ export { CompletionCreateParamsStreaming, Completions, } from './completions'; -export { CreateEmbeddingResponse, Embedding, EmbeddingCreateParams, Embeddings } from './embeddings'; +export { + CreateEmbeddingResponse, + Embedding, + EmbeddingModel, + EmbeddingCreateParams, + Embeddings, +} from './embeddings'; export { FileContent, FileDeleted, From ee1e4504d74f456800b671d163ad506f73846cca Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 20 Sep 2024 05:06:35 +0000 Subject: [PATCH 266/533] release: 4.63.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7a0305307..541459357 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.62.1" + ".": "4.63.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index cdf113343..a9677f6d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.63.0 (2024-09-20) + +Full Changelog: [v4.62.1...v4.63.0](https://github.com/openai/openai-node/compare/v4.62.1...v4.63.0) + +### Features + +* **client:** send retry count header ([#1087](https://github.com/openai/openai-node/issues/1087)) ([7bcebc0](https://github.com/openai/openai-node/commit/7bcebc0e3965c2decd1dffb1e67f5197260ca89e)) + + +### Chores + +* **types:** improve type name for embedding models ([#1089](https://github.com/openai/openai-node/issues/1089)) ([d6966d9](https://github.com/openai/openai-node/commit/d6966d9872a14b7fbee85a7bb1fae697852b8ce0)) + ## 4.62.1 (2024-09-18) Full Changelog: [v4.62.0...v4.62.1](https://github.com/openai/openai-node/compare/v4.62.0...v4.62.1) diff --git a/README.md b/README.md index 55e6c291d..b2a3bc4b4 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.62.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.63.0/mod.ts'; ``` diff --git a/package.json b/package.json index 146f1e6f9..831169e2d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.62.1", + "version": "4.63.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index ef51b1e45..f006e3f3f 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.62.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.63.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 1cfad9ed4..ee209cb0e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.62.1'; // x-release-please-version +export const VERSION = '4.63.0'; // x-release-please-version From 5ecdeae16716106ae5c3fa7c8235633c98ad3dd9 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 23 Sep 2024 18:49:47 +0100 Subject: [PATCH 267/533] chore(internal): fix slow ecosystem test (#1093) --- ecosystem-tests/ts-browser-webpack/.babelrc | 6 +- .../ts-browser-webpack/package-lock.json | 4516 +++++++++-------- .../ts-browser-webpack/package.json | 12 +- .../ts-browser-webpack/webpack.config.js | 12 +- 4 files changed, 2478 insertions(+), 2068 deletions(-) diff --git a/ecosystem-tests/ts-browser-webpack/.babelrc b/ecosystem-tests/ts-browser-webpack/.babelrc index c13c5f627..248fa61e3 100644 --- a/ecosystem-tests/ts-browser-webpack/.babelrc +++ b/ecosystem-tests/ts-browser-webpack/.babelrc @@ -1,3 +1,7 @@ { - "presets": ["es2015"] + "presets": [ + "@babel/preset-env", // Automatically determines the Babel plugins and polyfills you need based on your target environments + "@babel/preset-typescript" // If you're using TypeScript, this preset will enable TypeScript transformation + ], + "plugins": [] } diff --git a/ecosystem-tests/ts-browser-webpack/package-lock.json b/ecosystem-tests/ts-browser-webpack/package-lock.json index 686d0c2f9..695b85955 100644 --- a/ecosystem-tests/ts-browser-webpack/package-lock.json +++ b/ecosystem-tests/ts-browser-webpack/package-lock.json @@ -8,17 +8,17 @@ "name": "ts-browser-webpack", "version": "0.0.1", "devDependencies": { - "babel-core": "^6.26.3", + "@babel/core": "^7.21.0", + "@babel/preset-env": "^7.21.0", + "@babel/preset-typescript": "^7.21.0", "babel-loader": "^9.1.2", - "babel-preset-es2015": "^6.24.1", "fastest-levenshtein": "^1.0.16", - "force": "^0.0.3", "html-webpack-plugin": "^5.5.3", - "puppeteer": "^20.8.3", + "puppeteer": "^23.4.0", "start-server-and-test": "^2.0.0", "ts-loader": "^9.4.3", "ts-node": "^10.9.1", - "typescript": "4.7.4", + "typescript": "^4.7.4", "webpack": "^5.87.0", "webpack-cli": "^5.0.2", "webpack-dev-server": "^4.15.1" @@ -29,7 +29,6 @@ "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", "dev": true, - "peer": true, "dependencies": { "@jridgewell/gen-mapping": "^0.3.0", "@jridgewell/trace-mapping": "^0.3.9" @@ -39,24 +38,23 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", - "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz", + "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", "dev": true, "dependencies": { - "@babel/highlight": "^7.22.13", - "chalk": "^2.4.2" + "@babel/highlight": "^7.24.7", + "picocolors": "^1.0.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/compat-data": { - "version": "7.22.9", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.9.tgz", - "integrity": "sha512-5UamI7xkUcJ3i9qVDS+KFDEK8/7oJ55/sJMB1Ge7IEapr7KfdfV/HErR+koZwOfd+SgtFKOKRhRakdg++DcJpQ==", + "version": "7.25.4", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.25.4.tgz", + "integrity": "sha512-+LGRog6RAsCJrrrg/IO6LGmpphNe5DiK30dGjCoxxeGv49B10/3XYGxPsAwrDlMFcFEvdAUavDT8r9k/hSyQqQ==", "dev": true, - "peer": true, "engines": { "node": ">=6.9.0" } @@ -66,7 +64,6 @@ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.11.tgz", "integrity": "sha512-lh7RJrtPdhibbxndr6/xx0w8+CVlY5FJZiaSz908Fpy+G0xkBFTvwLcKJFF4PJxVfGhVWNebikpWGnOoC71juQ==", "dev": true, - "peer": true, "dependencies": { "@ampproject/remapping": "^2.2.0", "@babel/code-frame": "^7.22.10", @@ -93,31 +90,54 @@ } }, "node_modules/@babel/generator": { - "version": "7.22.10", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.10.tgz", - "integrity": "sha512-79KIf7YiWjjdZ81JnLujDRApWtl7BxTqWD88+FFdQEIOG8LJ0etDOM7CXuIgGJa55sGOwZVwuEsaLEm0PJ5/+A==", + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.25.6.tgz", + "integrity": "sha512-VPC82gr1seXOpkjAAKoLhP50vx4vGNlF4msF64dSFq1P8RfB+QAuJWGHPXXPc8QyfVWwwB/TNNU4+ayZmHNbZw==", "dev": true, - "peer": true, "dependencies": { - "@babel/types": "^7.22.10", - "@jridgewell/gen-mapping": "^0.3.2", - "@jridgewell/trace-mapping": "^0.3.17", + "@babel/types": "^7.25.6", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", "jsesc": "^2.5.1" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.24.7.tgz", + "integrity": "sha512-BaDeOonYvhdKw+JoMVkAixAAJzG2jVPIwWoKBPdYuY9b452e2rPuI9QPYh3KpofZ3pW2akOmwZLOiOsHMiqRAg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.24.7.tgz", + "integrity": "sha512-xZeCVVdwb4MsDBkkyZ64tReWYrLRHlMN72vP7Bdm3OUOuyFZExhsHUUnuWnm2/XOlAJzR0LfPpB56WXZn0X/lA==", + "dev": true, + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.22.10", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.10.tgz", - "integrity": "sha512-JMSwHD4J7SLod0idLq5PKgI+6g/hLD/iuWBq08ZX49xE14VpVEojJ5rHWptpirV2j020MvypRLAXAO50igCJ5Q==", + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.2.tgz", + "integrity": "sha512-U2U5LsSaZ7TAt3cfaymQ8WHh0pxvdHoEk6HVpaexxixjyEquMh0L0YNJNM6CTGKMXV1iksi0iZkGw4AcFkPaaw==", "dev": true, - "peer": true, "dependencies": { - "@babel/compat-data": "^7.22.9", - "@babel/helper-validator-option": "^7.22.5", - "browserslist": "^4.21.9", + "@babel/compat-data": "^7.25.2", + "@babel/helper-validator-option": "^7.24.8", + "browserslist": "^4.23.1", "lru-cache": "^5.1.1", "semver": "^6.3.1" }, @@ -125,68 +145,189 @@ "node": ">=6.9.0" } }, - "node_modules/@babel/helper-environment-visitor": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz", - "integrity": "sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==", + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.25.4", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.25.4.tgz", + "integrity": "sha512-ro/bFs3/84MDgDmMwbcHgDa8/E6J3QKNTk4xJJnVeFtGE+tL0K26E3pNxhYz2b67fJpt7Aphw5XcploKXuCvCQ==", "dev": true, - "peer": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-member-expression-to-functions": "^7.24.8", + "@babel/helper-optimise-call-expression": "^7.24.7", + "@babel/helper-replace-supers": "^7.25.0", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/traverse": "^7.25.4", + "semver": "^6.3.1" + }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "node_modules/@babel/helper-function-name": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz", - "integrity": "sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==", + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.25.2.tgz", + "integrity": "sha512-+wqVGP+DFmqwFD3EH6TMTfUNeqDehV3E/dl+Sd54eaXqm17tEUNbEIn4sVivVowbvUpOtIGxdo3GoXyDH9N/9g==", "dev": true, - "peer": true, "dependencies": { - "@babel/template": "^7.22.5", - "@babel/types": "^7.22.5" + "@babel/helper-annotate-as-pure": "^7.24.7", + "regexpu-core": "^5.3.1", + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "node_modules/@babel/helper-hoist-variables": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", - "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", + "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/regexpu-core": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", + "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "dev": true, + "dependencies": { + "@babel/regjsgen": "^0.8.0", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.1.0", + "regjsparser": "^0.9.1", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/regjsparser": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", + "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "dev": true, + "dependencies": { + "jsesc": "~0.5.0" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.2.tgz", + "integrity": "sha512-LV76g+C502biUK6AyZ3LK10vDpDyCzZnhZFXkH1L75zHPj68+qc8Zfpx2th+gzwA2MzyK+1g/3EPl62yFnVttQ==", + "dev": true, + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.6", + "@babel/helper-plugin-utils": "^7.22.5", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.24.8.tgz", + "integrity": "sha512-LABppdt+Lp/RlBxqrh4qgf1oEH/WxdzQNDJIu5gC/W1GyvPVrOBiItmmM8wan2fm4oYqFuFfkXmlGpLQhPY8CA==", "dev": true, - "peer": true, "dependencies": { - "@babel/types": "^7.22.5" + "@babel/traverse": "^7.24.8", + "@babel/types": "^7.24.8" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-imports": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz", - "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz", + "integrity": "sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==", "dev": true, - "peer": true, "dependencies": { - "@babel/types": "^7.22.5" + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.22.9", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.9.tgz", - "integrity": "sha512-t+WA2Xn5K+rTeGtC8jCsdAH52bjggG5TKRuRrAGNM/mjIbO4GxvlLMFOEz9wXY5I2XQ60PMFsAG2WIcG82dQMQ==", + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.25.2.tgz", + "integrity": "sha512-BjyRAbix6j/wv83ftcVJmBt72QtHI56C7JXZoG2xATiLpmoC7dpd8WnkikExHDVPpi/3qCmO6WY1EaXOluiecQ==", + "dev": true, + "dependencies": { + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-simple-access": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7", + "@babel/traverse": "^7.25.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.24.7.tgz", + "integrity": "sha512-jKiTsW2xmWwxT1ixIdfXUZp+P5yURx2suzLZr5Hi64rURpDYdMW0pv+Uf17EYk2Rd428Lx4tLsnjGJzYKDM/6A==", + "dev": true, + "dependencies": { + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.8.tgz", + "integrity": "sha512-FFWx5142D8h2Mgr/iPVGH5G7w6jDn4jUSpZTyDnQO0Yn7Ks2Kuz6Pci8H6MPCoUJegd/UZQ3tAvfLCxQSnWWwg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.25.0.tgz", + "integrity": "sha512-NhavI2eWEIz/H9dbrG0TuOicDhNexze43i5z7lEqwYm0WEZVTwnPpA0EafUTP7+6/W79HWIP2cTe3Z5NiSTVpw==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-wrap-function": "^7.25.0", + "@babel/traverse": "^7.25.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.25.0.tgz", + "integrity": "sha512-q688zIvQVYtZu+i2PsdIu/uWGRpfxzr5WESsfpShfZECkO+d2o+WROWezCi/Q6kJ0tfPa5+pUGUlfx2HhrA3Bg==", "dev": true, - "peer": true, "dependencies": { - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-module-imports": "^7.22.5", - "@babel/helper-simple-access": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/helper-validator-identifier": "^7.22.5" + "@babel/helper-member-expression-to-functions": "^7.24.8", + "@babel/helper-optimise-call-expression": "^7.24.7", + "@babel/traverse": "^7.25.0" }, "engines": { "node": ">=6.9.0" @@ -196,56 +337,68 @@ } }, "node_modules/@babel/helper-simple-access": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", - "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.24.7.tgz", + "integrity": "sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==", "dev": true, - "peer": true, "dependencies": { - "@babel/types": "^7.22.5" + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/helper-split-export-declaration": { - "version": "7.22.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", - "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.24.7.tgz", + "integrity": "sha512-IO+DLT3LQUElMbpzlatRASEyQtfhSE0+m465v++3jyyXeBTBUjtVZg28/gHeV5mrTJqvEKhKroBGAvhW+qPHiQ==", "dev": true, - "peer": true, "dependencies": { - "@babel/types": "^7.22.5" + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", - "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.8.tgz", + "integrity": "sha512-pO9KhhRcuUyGnJWwyEgnRJTSIZHiT+vMD0kPeD+so0l7mxkMT19g3pjY9GTnHySck/hDzq+dtW/4VgnMkippsQ==", "dev": true, - "peer": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz", - "integrity": "sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", + "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-option": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz", - "integrity": "sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw==", + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.24.8.tgz", + "integrity": "sha512-xb8t9tD1MHLungh/AIoWYN+gVHaB9kwlu8gffXGSt3FFEIT7RjS+xWbc2vUD1UTZdIpKj/ab3rdqJ7ufngyi2Q==", "dev": true, - "peer": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.25.0.tgz", + "integrity": "sha512-s6Q1ebqutSiZnEjaofc/UKDyC4SbzV5n5SrA2Gq8UawLycr3i04f1dX4OzoQVnexm6aOCh37SQNYlJ/8Ku+PMQ==", + "dev": true, + "dependencies": { + "@babel/template": "^7.25.0", + "@babel/traverse": "^7.25.0", + "@babel/types": "^7.25.0" + }, "engines": { "node": ">=6.9.0" } @@ -255,7 +408,6 @@ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.11.tgz", "integrity": "sha512-vyOXC8PBWaGc5h7GMsNx68OH33cypkEDJCHvYVVgVbbxJDROYVtexSk0gK5iCF1xNjRIN2s8ai7hwkWDq5szWg==", "dev": true, - "peer": true, "dependencies": { "@babel/template": "^7.22.5", "@babel/traverse": "^7.22.11", @@ -266,25 +418,28 @@ } }, "node_modules/@babel/highlight": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.13.tgz", - "integrity": "sha512-C/BaXcnnvBCmHTpz/VGZ8jgtE2aYlW4hxDhseJAWZb7gqGM/qtCK6iZUb0TyKFf7BOUsBH7Q7fkRsDRhg1XklQ==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz", + "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", "dev": true, "dependencies": { - "@babel/helper-validator-identifier": "^7.22.5", + "@babel/helper-validator-identifier": "^7.24.7", "chalk": "^2.4.2", - "js-tokens": "^4.0.0" + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/parser": { - "version": "7.22.14", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.14.tgz", - "integrity": "sha512-1KucTHgOvaw/LzCVrEOAyXkr9rQlp0A1HiHRYnSUE9dmb8PvPW7o5sscg+5169r54n3vGlbx6GevTE/Iw/P3AQ==", + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.25.6.tgz", + "integrity": "sha512-trGdfBdbD0l1ZPmcJ83eNxB9rbEax4ALFTF7fN386TMYbeCQbyme5cOEXQhbGXKebwGaB/J52w1mrklMcbgy6Q==", "dev": true, - "peer": true, + "dependencies": { + "@babel/types": "^7.25.6" + }, "bin": { "parser": "bin/babel-parser.js" }, @@ -292,1603 +447,2338 @@ "node": ">=6.0.0" } }, - "node_modules/@babel/template": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.5.tgz", - "integrity": "sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw==", + "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { + "version": "7.25.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.25.3.tgz", + "integrity": "sha512-wUrcsxZg6rqBXG05HG1FPYgsP6EvwF4WpBbxIpWIIYnH8wG0gzx3yZY3dtEHas4sTAOGkbTsc9EGPxwff8lRoA==", "dev": true, - "peer": true, "dependencies": { - "@babel/code-frame": "^7.22.5", - "@babel/parser": "^7.22.5", - "@babel/types": "^7.22.5" + "@babel/helper-plugin-utils": "^7.24.8", + "@babel/traverse": "^7.25.3" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "node_modules/@babel/traverse": { - "version": "7.22.11", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.11.tgz", - "integrity": "sha512-mzAenteTfomcB7mfPtyi+4oe5BZ6MXxWcn4CX+h4IRJ+OOGXBrWU6jDQavkQI9Vuc5P+donFabBfFCcmWka9lQ==", + "node_modules/@babel/plugin-bugfix-safari-class-field-initializer-scope": { + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-class-field-initializer-scope/-/plugin-bugfix-safari-class-field-initializer-scope-7.25.0.tgz", + "integrity": "sha512-Bm4bH2qsX880b/3ziJ8KD711LT7z4u8CFudmjqle65AZj/HNUFhEf90dqYv6O86buWvSBmeQDjv0Tn2aF/bIBA==", "dev": true, - "peer": true, "dependencies": { - "@babel/code-frame": "^7.22.10", - "@babel/generator": "^7.22.10", - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-function-name": "^7.22.5", - "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/parser": "^7.22.11", - "@babel/types": "^7.22.11", - "debug": "^4.1.0", - "globals": "^11.1.0" + "@babel/helper-plugin-utils": "^7.24.8" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "node_modules/@babel/types": { - "version": "7.22.11", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.11.tgz", - "integrity": "sha512-siazHiGuZRz9aB9NpHy9GOs9xiQPKnMzgdr493iI1M67vRXpnEq8ZOOKzezC5q7zwuQ6sDhdSp4SD9ixKSqKZg==", + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.25.0.tgz", + "integrity": "sha512-lXwdNZtTmeVOOFtwM/WDe7yg1PL8sYhRk/XH0FzbR2HDQ0xC+EnQ/JHeoMYSavtU115tnUk0q9CDyq8si+LMAA==", "dev": true, - "peer": true, "dependencies": { - "@babel/helper-string-parser": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.5", - "to-fast-properties": "^2.0.0" + "@babel/helper-plugin-utils": "^7.24.8" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "node_modules/@cspotcode/source-map-support": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", - "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.24.7.tgz", + "integrity": "sha512-+izXIbke1T33mY4MSNnrqhPXDz01WYhEf3yF5NbnUtkiNnm+XBZJl3kNfoK6NKmYlz/D07+l2GWVK/QfDkNCuQ==", "dev": true, "dependencies": { - "@jridgewell/trace-mapping": "0.3.9" + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/plugin-transform-optional-chaining": "^7.24.7" }, "engines": { - "node": ">=12" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" } }, - "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { - "version": "0.3.9", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", - "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.25.0.tgz", + "integrity": "sha512-tggFrk1AIShG/RUQbEwt2Tr/E+ObkfwrPjR6BjbRvsx24+PSjK8zrq0GWPNCjo8qpRx4DuJzlcvWJqlm+0h3kw==", "dev": true, "dependencies": { - "@jridgewell/resolve-uri": "^3.0.3", - "@jridgewell/sourcemap-codec": "^1.4.10" + "@babel/helper-plugin-utils": "^7.24.8", + "@babel/traverse": "^7.25.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "node_modules/@discoveryjs/json-ext": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", - "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", "dev": true, "engines": { - "node": ">=10.0.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@hapi/hoek": { - "version": "9.3.0", - "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", - "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==", - "dev": true - }, - "node_modules/@hapi/topo": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", - "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", "dev": true, "dependencies": { - "@hapi/hoek": "^9.0.0" + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", - "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", "dev": true, "dependencies": { - "@jridgewell/set-array": "^1.0.1", - "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" + "@babel/helper-plugin-utils": "^7.12.13" }, - "engines": { - "node": ">=6.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", - "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, "engines": { - "node": ">=6.0.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", "dev": true, - "engines": { - "node": ">=6.0.0" + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@jridgewell/source-map": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.5.tgz", - "integrity": "sha512-UTYAUj/wviwdsMfzoSJspJxbkH5o1snzwX0//0ENX1u/55kkZZkcTZP6u9bwKGkv+dkk9at4m1Cpt0uY80kcpQ==", + "node_modules/@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", "dev": true, "dependencies": { - "@jridgewell/gen-mapping": "^0.3.0", - "@jridgewell/trace-mapping": "^0.3.9" + "@babel/helper-plugin-utils": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.15", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", - "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", - "dev": true - }, - "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.19", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", - "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.25.6.tgz", + "integrity": "sha512-aABl0jHw9bZ2karQ/uUD6XP4u0SG22SJrOHFoL6XB1R7dTovOP4TzTlsxOYC5yQ1pdscVK2JTUnF6QL3ARoAiQ==", "dev": true, "dependencies": { - "@jridgewell/resolve-uri": "^3.1.0", - "@jridgewell/sourcemap-codec": "^1.4.14" + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@leichtgewicht/ip-codec": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz", - "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==", - "dev": true - }, - "node_modules/@puppeteer/browsers": { - "version": "1.4.6", - "resolved": "https://registry.npmjs.org/@puppeteer/browsers/-/browsers-1.4.6.tgz", - "integrity": "sha512-x4BEjr2SjOPowNeiguzjozQbsc6h437ovD/wu+JpaenxVLm3jkgzHY2xOslMTp50HoTvQreMjiexiGQw1sqZlQ==", + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.25.6.tgz", + "integrity": "sha512-sXaDXaJN9SNLymBdlWFA+bjzBhFD617ZaFiY13dGt7TVslVvVgA6fkZOP7Ki3IGElC45lwHdOTrCtKZGVAWeLQ==", "dev": true, "dependencies": { - "debug": "4.3.4", - "extract-zip": "2.0.1", - "progress": "2.0.3", - "proxy-agent": "6.3.0", - "tar-fs": "3.0.4", - "unbzip2-stream": "1.4.3", - "yargs": "17.7.1" - }, - "bin": { - "browsers": "lib/cjs/main-cli.js" + "@babel/helper-plugin-utils": "^7.24.8" }, "engines": { - "node": ">=16.3.0" + "node": ">=6.9.0" }, "peerDependencies": { - "typescript": ">= 4.7.4" + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@sideway/address": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz", - "integrity": "sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==", + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", "dev": true, "dependencies": { - "@hapi/hoek": "^9.0.0" + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@sideway/formula": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", - "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==", - "dev": true - }, - "node_modules/@sideway/pinpoint": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", - "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==", - "dev": true - }, - "node_modules/@tootallnate/quickjs-emscripten": { - "version": "0.23.0", - "resolved": "https://registry.npmjs.org/@tootallnate/quickjs-emscripten/-/quickjs-emscripten-0.23.0.tgz", - "integrity": "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==", - "dev": true - }, - "node_modules/@tsconfig/node10": { - "version": "1.0.9", - "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.9.tgz", - "integrity": "sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==", - "dev": true - }, - "node_modules/@tsconfig/node12": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", - "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", - "dev": true - }, - "node_modules/@tsconfig/node14": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", - "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", - "dev": true - }, - "node_modules/@tsconfig/node16": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", - "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", - "dev": true - }, - "node_modules/@types/body-parser": { - "version": "1.19.2", - "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", - "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.7.tgz", + "integrity": "sha512-6ddciUPe/mpMnOKv/U+RSd2vvVy+Yw/JfBB0ZHYjEZt9NLHmCUylNYlsbqCCS1Bffjlb0fCwC9Vqz+sBz6PsiQ==", "dev": true, "dependencies": { - "@types/connect": "*", - "@types/node": "*" + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/bonjour": { - "version": "3.5.10", - "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.10.tgz", - "integrity": "sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==", + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", "dev": true, "dependencies": { - "@types/node": "*" + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/connect": { - "version": "3.4.35", - "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", - "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==", + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", "dev": true, "dependencies": { - "@types/node": "*" + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/connect-history-api-fallback": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.0.tgz", - "integrity": "sha512-4x5FkPpLipqwthjPsF7ZRbOv3uoLUFkTA9G9v583qi4pACvq0uTELrB8OLUzPWUI4IJIyvM85vzkV1nyiI2Lig==", + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", "dev": true, "dependencies": { - "@types/express-serve-static-core": "*", - "@types/node": "*" + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/eslint": { - "version": "8.44.2", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.44.2.tgz", - "integrity": "sha512-sdPRb9K6iL5XZOmBubg8yiFp5yS/JdUDQsq5e6h95km91MCYMuvp7mh1fjPEYUhvHepKpZOjnEaMBR4PxjWDzg==", + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", "dev": true, "dependencies": { - "@types/estree": "*", - "@types/json-schema": "*" + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/eslint-scope": { - "version": "3.7.4", - "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.4.tgz", - "integrity": "sha512-9K4zoImiZc3HlIp6AVUDE4CWYx22a+lhSZMYNpbjW04+YF0KWj4pJXnEMjdnFTiQibFFmElcsasJXDbdI/EPhA==", + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", "dev": true, "dependencies": { - "@types/eslint": "*", - "@types/estree": "*" + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/estree": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz", - "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==", - "dev": true - }, - "node_modules/@types/express": { - "version": "4.17.17", - "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.17.tgz", - "integrity": "sha512-Q4FmmuLGBG58btUnfS1c1r/NQdlp3DMfGDGig8WhfpA2YRUtEkxAjkZb0yvplJGYdF1fsQ81iMDcH24sSCNC/Q==", + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", "dev": true, "dependencies": { - "@types/body-parser": "*", - "@types/express-serve-static-core": "^4.17.33", - "@types/qs": "*", - "@types/serve-static": "*" + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/express-serve-static-core": { - "version": "4.17.36", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.36.tgz", - "integrity": "sha512-zbivROJ0ZqLAtMzgzIUC4oNqDG9iF0lSsAqpOD9kbs5xcIM3dTiyuHvBc7R8MtWBp3AAWGaovJa+wzWPjLYW7Q==", + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", "dev": true, "dependencies": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*", - "@types/send": "*" + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/html-minifier-terser": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", - "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==", - "dev": true - }, - "node_modules/@types/http-errors": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.1.tgz", - "integrity": "sha512-/K3ds8TRAfBvi5vfjuz8y6+GiAYBZ0x4tXv1Av6CWBWn0IlADc+ZX9pMq7oU0fNQPnBwIZl3rmeLp6SBApbxSQ==", - "dev": true - }, - "node_modules/@types/http-proxy": { - "version": "1.17.11", - "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.11.tgz", - "integrity": "sha512-HC8G7c1WmaF2ekqpnFq626xd3Zz0uvaqFmBJNRZCGEZCXkvSdJoNFn/8Ygbd9fKNQj8UzLdCETaI0UWPAjK7IA==", + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", "dev": true, "dependencies": { - "@types/node": "*" + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/json-schema": { - "version": "7.0.12", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.12.tgz", - "integrity": "sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==", - "dev": true - }, - "node_modules/@types/mime": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz", - "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==", - "dev": true - }, - "node_modules/@types/node": { - "version": "20.5.7", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.7.tgz", - "integrity": "sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==", - "dev": true - }, - "node_modules/@types/qs": { - "version": "6.9.8", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.8.tgz", - "integrity": "sha512-u95svzDlTysU5xecFNTgfFG5RUWu1A9P0VzgpcIiGZA9iraHOdSzcxMxQ55DyeRaGCSxQi7LxXDI4rzq/MYfdg==", - "dev": true - }, - "node_modules/@types/range-parser": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", - "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==", - "dev": true - }, - "node_modules/@types/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", - "dev": true - }, - "node_modules/@types/send": { - "version": "0.17.1", - "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.1.tgz", - "integrity": "sha512-Cwo8LE/0rnvX7kIIa3QHCkcuF21c05Ayb0ZfxPiv0W8VRiZiNW/WuRupHKpqqGVGf7SUA44QSOUKaEd9lIrd/Q==", + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.25.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.25.4.tgz", + "integrity": "sha512-uMOCoHVU52BsSWxPOMVv5qKRdeSlPuImUCB2dlPuBSU+W2/ROE7/Zg8F2Kepbk+8yBa68LlRKxO+xgEVWorsDg==", "dev": true, "dependencies": { - "@types/mime": "^1", - "@types/node": "*" + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/serve-index": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.1.tgz", - "integrity": "sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==", + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", "dev": true, "dependencies": { - "@types/express": "*" + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "node_modules/@types/serve-static": { - "version": "1.15.2", - "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.2.tgz", - "integrity": "sha512-J2LqtvFYCzaj8pVYKw8klQXrLLk7TBZmQ4ShlcdkELFKGwGMfevMLneMMRkMgZxotOD9wg497LpC7O8PcvAmfw==", + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.24.7.tgz", + "integrity": "sha512-Dt9LQs6iEY++gXUwY03DNFat5C2NbO48jj+j/bSAz6b3HgPs39qcPiYt77fDObIcFwj3/C2ICX9YMwGflUoSHQ==", "dev": true, "dependencies": { - "@types/http-errors": "*", - "@types/mime": "*", - "@types/node": "*" + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/sockjs": { - "version": "0.3.33", - "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.33.tgz", - "integrity": "sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==", + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.25.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.25.4.tgz", + "integrity": "sha512-jz8cV2XDDTqjKPwVPJBIjORVEmSGYhdRa8e5k5+vN+uwcjSrSxUaebBRa4ko1jqNF2uxyg8G6XYk30Jv285xzg==", "dev": true, "dependencies": { - "@types/node": "*" + "@babel/helper-plugin-utils": "^7.24.8", + "@babel/helper-remap-async-to-generator": "^7.25.0", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/traverse": "^7.25.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/ws": { - "version": "8.5.5", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.5.tgz", - "integrity": "sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg==", + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.24.7.tgz", + "integrity": "sha512-SQY01PcJfmQ+4Ash7NE+rpbLFbmqA2GPIgqzxfFTL4t1FKRq4zTms/7htKpoCUI9OcFYgzqfmCdH53s6/jn5fA==", "dev": true, "dependencies": { - "@types/node": "*" + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-remap-async-to-generator": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@types/yauzl": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/@types/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha512-Cn6WYCm0tXv8p6k+A8PvbDG763EDpBoTzHdA+Q/MF6H3sapGjCm9NzoaJncJS9tUKSuCoDs9XHxYYsQDgxR6kw==", + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.24.7.tgz", + "integrity": "sha512-yO7RAz6EsVQDaBH18IDJcMB1HnrUn2FJ/Jslc/WtPPWcjhpUJXU/rjbwmluzp7v/ZzWcEhTMXELnnsz8djWDwQ==", "dev": true, - "optional": true, "dependencies": { - "@types/node": "*" + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@webassemblyjs/ast": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz", - "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.25.0.tgz", + "integrity": "sha512-yBQjYoOjXlFv9nlXb3f1casSHOZkWr29NX+zChVanLg5Nc157CrbEX9D7hxxtTpuFy7Q0YzmmWfJxzvps4kXrQ==", "dev": true, "dependencies": { - "@webassemblyjs/helper-numbers": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", - "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==", - "dev": true - }, - "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", - "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==", - "dev": true - }, - "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.6.tgz", - "integrity": "sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==", - "dev": true - }, - "node_modules/@webassemblyjs/helper-numbers": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", - "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.25.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.25.4.tgz", + "integrity": "sha512-nZeZHyCWPfjkdU5pA/uHiTaDAFUEqkpzf1YoQT2NeSynCGYq9rxfyI3XpQbfx/a0hSnFH6TGlEXvae5Vi7GD8g==", "dev": true, "dependencies": { - "@webassemblyjs/floating-point-hex-parser": "1.11.6", - "@webassemblyjs/helper-api-error": "1.11.6", - "@xtuc/long": "4.2.2" + "@babel/helper-create-class-features-plugin": "^7.25.4", + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", - "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==", - "dev": true - }, - "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.6.tgz", - "integrity": "sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==", + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.24.7.tgz", + "integrity": "sha512-HMXK3WbBPpZQufbMG4B46A90PkuuhN9vBCb5T8+VAHqvAqvcLi+2cKoukcpmUYkszLhScU3l1iudhrks3DggRQ==", "dev": true, "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-buffer": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/wasm-gen": "1.11.6" + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" } }, - "node_modules/@webassemblyjs/ieee754": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", - "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "node_modules/@babel/plugin-transform-classes": { + "version": "7.25.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.25.4.tgz", + "integrity": "sha512-oexUfaQle2pF/b6E0dwsxQtAol9TLSO88kQvym6HHBWFliV2lGdrPieX+WgMRLSJDVzdYywk7jXbLPuO2KLTLg==", "dev": true, "dependencies": { - "@xtuc/ieee754": "^1.2.0" + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-compilation-targets": "^7.25.2", + "@babel/helper-plugin-utils": "^7.24.8", + "@babel/helper-replace-supers": "^7.25.0", + "@babel/traverse": "^7.25.4", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@webassemblyjs/leb128": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", - "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.24.7.tgz", + "integrity": "sha512-25cS7v+707Gu6Ds2oY6tCkUwsJ9YIDbggd9+cu9jzzDgiNq7hR/8dkzxWfKWnTic26vsI3EsCXNd4iEB6e8esQ==", "dev": true, "dependencies": { - "@xtuc/long": "4.2.2" + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/template": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@webassemblyjs/utf8": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", - "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==", - "dev": true - }, - "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.6.tgz", - "integrity": "sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==", + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.24.8.tgz", + "integrity": "sha512-36e87mfY8TnRxc7yc6M9g9gOB7rKgSahqkIKwLpz4Ppk2+zC2Cy1is0uwtuSG6AE4zlTOUa+7JGz9jCJGLqQFQ==", "dev": true, "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-buffer": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/helper-wasm-section": "1.11.6", - "@webassemblyjs/wasm-gen": "1.11.6", - "@webassemblyjs/wasm-opt": "1.11.6", - "@webassemblyjs/wasm-parser": "1.11.6", - "@webassemblyjs/wast-printer": "1.11.6" + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.6.tgz", - "integrity": "sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==", + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.24.7.tgz", + "integrity": "sha512-ZOA3W+1RRTSWvyqcMJDLqbchh7U4NRGqwRfFSVbOLS/ePIP4vHB5e8T8eXcuqyN1QkgKyj5wuW0lcS85v4CrSw==", "dev": true, "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/ieee754": "1.11.6", - "@webassemblyjs/leb128": "1.11.6", - "@webassemblyjs/utf8": "1.11.6" + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.6.tgz", - "integrity": "sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==", + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.24.7.tgz", + "integrity": "sha512-JdYfXyCRihAe46jUIliuL2/s0x0wObgwwiGxw/UbgJBr20gQBThrokO4nYKgWkD7uBaqM7+9x5TU7NkExZJyzw==", "dev": true, "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-buffer": "1.11.6", - "@webassemblyjs/wasm-gen": "1.11.6", - "@webassemblyjs/wasm-parser": "1.11.6" + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz", - "integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==", + "node_modules/@babel/plugin-transform-duplicate-named-capturing-groups-regex": { + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-named-capturing-groups-regex/-/plugin-transform-duplicate-named-capturing-groups-regex-7.25.0.tgz", + "integrity": "sha512-YLpb4LlYSc3sCUa35un84poXoraOiQucUTTu8X1j18JV+gNa8E0nyUf/CjZ171IRGr4jEguF+vzJU66QZhn29g==", "dev": true, "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-api-error": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/ieee754": "1.11.6", - "@webassemblyjs/leb128": "1.11.6", - "@webassemblyjs/utf8": "1.11.6" + "@babel/helper-create-regexp-features-plugin": "^7.25.0", + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "node_modules/@webassemblyjs/wast-printer": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.6.tgz", - "integrity": "sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==", + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.24.7.tgz", + "integrity": "sha512-sc3X26PhZQDb3JhORmakcbvkeInvxz+A8oda99lj7J60QRuPZvNAk9wQlTBS1ZynelDrDmTU4pw1tyc5d5ZMUg==", "dev": true, "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@xtuc/long": "4.2.2" + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@webpack-cli/configtest": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@webpack-cli/configtest/-/configtest-2.1.1.tgz", - "integrity": "sha512-wy0mglZpDSiSS0XHrVR+BAdId2+yxPSoJW8fsna3ZpYSlufjvxnP4YbKTCBZnNIcGN4r6ZPXV55X4mYExOfLmw==", + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.24.7.tgz", + "integrity": "sha512-Rqe/vSc9OYgDajNIK35u7ot+KeCoetqQYFXM4Epf7M7ez3lWlOjrDjrwMei6caCVhfdw+mIKD4cgdGNy5JQotQ==", "dev": true, + "dependencies": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, "engines": { - "node": ">=14.15.0" + "node": ">=6.9.0" }, "peerDependencies": { - "webpack": "5.x.x", - "webpack-cli": "5.x.x" + "@babel/core": "^7.0.0-0" } }, - "node_modules/@webpack-cli/info": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@webpack-cli/info/-/info-2.0.2.tgz", - "integrity": "sha512-zLHQdI/Qs1UyT5UBdWNqsARasIA+AaF8t+4u2aS2nEpBQh2mWIVb8qAklq0eUENnC5mOItrIB4LiS9xMtph18A==", + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.24.7.tgz", + "integrity": "sha512-v0K9uNYsPL3oXZ/7F9NNIbAj2jv1whUEtyA6aujhekLs56R++JDQuzRcP2/z4WX5Vg/c5lE9uWZA0/iUoFhLTA==", "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + }, "engines": { - "node": ">=14.15.0" + "node": ">=6.9.0" }, "peerDependencies": { - "webpack": "5.x.x", - "webpack-cli": "5.x.x" + "@babel/core": "^7.0.0-0" } }, - "node_modules/@webpack-cli/serve": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@webpack-cli/serve/-/serve-2.0.5.tgz", - "integrity": "sha512-lqaoKnRYBdo1UgDX8uF24AfGMifWK19TxPmM5FHc2vAGxrJ/qtyUyFBWoY1tISZdelsQ5fBcOusifo5o5wSJxQ==", + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.24.7.tgz", + "integrity": "sha512-wo9ogrDG1ITTTBsy46oGiN1dS9A7MROBTcYsfS8DtsImMkHk9JXJ3EWQM6X2SUw4x80uGPlwj0o00Uoc6nEE3g==", "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7" + }, "engines": { - "node": ">=14.15.0" + "node": ">=6.9.0" }, "peerDependencies": { - "webpack": "5.x.x", - "webpack-cli": "5.x.x" - }, - "peerDependenciesMeta": { - "webpack-dev-server": { - "optional": true - } + "@babel/core": "^7.0.0-0" } }, - "node_modules/@xtuc/ieee754": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", - "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", - "dev": true - }, - "node_modules/@xtuc/long": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", - "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", - "dev": true - }, - "node_modules/accepts": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", - "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.25.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.25.1.tgz", + "integrity": "sha512-TVVJVdW9RKMNgJJlLtHsKDTydjZAbwIsn6ySBPQaEAUU5+gVvlJt/9nRmqVbsV/IBanRjzWoaAQKLoamWVOUuA==", "dev": true, "dependencies": { - "mime-types": "~2.1.34", - "negotiator": "0.6.3" + "@babel/helper-compilation-targets": "^7.24.8", + "@babel/helper-plugin-utils": "^7.24.8", + "@babel/traverse": "^7.25.1" }, "engines": { - "node": ">= 0.6" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/acorn": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", - "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.24.7.tgz", + "integrity": "sha512-2yFnBGDvRuxAaE/f0vfBKvtnvvqU8tGpMHqMNpTN2oWMKIR3NqFkjaAgGwawhqK/pIN2T3XdjGPdaG0vDhOBGw==", "dev": true, - "bin": { - "acorn": "bin/acorn" + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-json-strings": "^7.8.3" }, "engines": { - "node": ">=0.4.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/acorn-import-assertions": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", - "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", + "node_modules/@babel/plugin-transform-literals": { + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.25.2.tgz", + "integrity": "sha512-HQI+HcTbm9ur3Z2DkO+jgESMAMcYLuN/A7NRw9juzxAezN9AvqvUTnpKP/9kkYANz6u7dFlAyOu44ejuGySlfw==", "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, "peerDependencies": { - "acorn": "^8" + "@babel/core": "^7.0.0-0" } }, - "node_modules/acorn-walk": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", - "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.24.7.tgz", + "integrity": "sha512-4D2tpwlQ1odXmTEIFWy9ELJcZHqrStlzK/dAOWYyxX3zT0iXQB6banjgeOJQXzEc4S0E0a5A+hahxPaEFYftsw==", "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + }, "engines": { - "node": ">=0.4.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/agent-base": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.0.tgz", - "integrity": "sha512-o/zjMZRhJxny7OyEF+Op8X+efiELC7k7yOjMzgfzVqOzXqkBkWI79YoTdOtsuWd5BWhAGAuOY/Xa6xpiaWXiNg==", + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.24.7.tgz", + "integrity": "sha512-T/hRC1uqrzXMKLQ6UCwMT85S3EvqaBXDGf0FaMf4446Qx9vKwlghvee0+uuZcDUCZU5RuNi4781UQ7R308zzBw==", "dev": true, "dependencies": { - "debug": "^4.3.4" + "@babel/helper-plugin-utils": "^7.24.7" }, "engines": { - "node": ">= 14" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.24.7.tgz", + "integrity": "sha512-9+pB1qxV3vs/8Hdmz/CulFB8w2tuu6EB94JZFsjdqxQokwGa9Unap7Bo2gGBGIvPmDIVvQrom7r5m/TCDMURhg==", "dev": true, "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/ajv-formats": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", - "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.24.8.tgz", + "integrity": "sha512-WHsk9H8XxRs3JXKWFiqtQebdh9b/pTk4EgueygFzYlTKAg0Ud985mSevdNjdXdFBATSKVJGQXP1tv6aGbssLKA==", "dev": true, "dependencies": { - "ajv": "^8.0.0" + "@babel/helper-module-transforms": "^7.24.8", + "@babel/helper-plugin-utils": "^7.24.8", + "@babel/helper-simple-access": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" }, "peerDependencies": { - "ajv": "^8.0.0" + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.25.0.tgz", + "integrity": "sha512-YPJfjQPDXxyQWg/0+jHKj1llnY5f/R6a0p/vP4lPymxLu7Lvl4k2WMitqi08yxwQcCVUUdG9LCUj4TNEgAp3Jw==", + "dev": true, + "dependencies": { + "@babel/helper-module-transforms": "^7.25.0", + "@babel/helper-plugin-utils": "^7.24.8", + "@babel/helper-validator-identifier": "^7.24.7", + "@babel/traverse": "^7.25.0" }, - "peerDependenciesMeta": { - "ajv": { - "optional": true - } + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/ajv-formats/node_modules/ajv": { - "version": "8.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", - "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.24.7.tgz", + "integrity": "sha512-3aytQvqJ/h9z4g8AsKPLvD4Zqi2qT+L3j7XoFFu1XBlZWEl2/1kWnhmAbxpLgPrHSY0M6UA02jyTiwUVtiKR6A==", "dev": true, "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" + "@babel/helper-module-transforms": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/ajv-formats/node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "dev": true + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.24.7.tgz", + "integrity": "sha512-/jr7h/EWeJtk1U/uz2jlsCioHkZk1JJZVcc8oQsJ1dUlaJD83f4/6Zeh2aHt9BIFokHIsSeDfhUmju0+1GPd6g==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } }, - "node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.24.7.tgz", + "integrity": "sha512-RNKwfRIXg4Ls/8mMTza5oPF5RkOW8Wy/WgMAp1/F1yZ8mMbtwXW+HDoJiOsagWrAhI5f57Vncrmr9XeT4CVapA==", "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, "peerDependencies": { - "ajv": "^6.9.1" + "@babel/core": "^7.0.0-0" } }, - "node_modules/ansi-html-community": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", - "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.24.7.tgz", + "integrity": "sha512-Ts7xQVk1OEocqzm8rHMXHlxvsfZ0cEF2yomUqpKENHWMF4zKk175Y4q8H5knJes6PgYad50uuRmt3UJuhBw8pQ==", "dev": true, - "engines": [ - "node >= 0.8.0" - ], - "bin": { - "ansi-html": "bin/ansi-html" + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.24.7.tgz", + "integrity": "sha512-e6q1TiVUzvH9KRvicuxdBTUj4AdKSRwzIyFFnfnezpCfP2/7Qmbb8qbU2j7GODbl4JMkblitCQjKYUaX/qkkwA==", "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + }, "engines": { - "node": ">=8" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.24.7.tgz", + "integrity": "sha512-4QrHAr0aXQCEFni2q4DqKLD31n2DL+RxcwnNjDFkSG0eNQ/xCavnRkfCUjsyqGC2OviNJvZOF/mQqZBw7i2C5Q==", "dev": true, "dependencies": { - "color-convert": "^1.9.0" + "@babel/helper-compilation-targets": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.24.7" }, "engines": { - "node": ">=4" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/anymatch": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", - "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.24.7.tgz", + "integrity": "sha512-A/vVLwN6lBrMFmMDmPPz0jnE6ZGx7Jq7d6sT/Ev4H65RER6pZ+kczlf1DthF5N0qaPHBsI7UXiE8Zy66nmAovg==", "dev": true, "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-replace-supers": "^7.24.7" }, "engines": { - "node": ">= 8" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/arg": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", - "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", - "dev": true + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.24.7.tgz", + "integrity": "sha512-uLEndKqP5BfBbC/5jTwPxLh9kqPWWgzN/f8w6UwAIirAEqiIVJWWY312X72Eub09g5KF9+Zn7+hT7sDxmhRuKA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.24.8.tgz", + "integrity": "sha512-5cTOLSMs9eypEy8JUVvIKOu6NgvbJMnpG62VpIHrTmROdQ+L5mDAaI40g25k5vXti55JWNX5jCkq3HZxXBQANw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.8", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } }, - "node_modules/array-flatten": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", - "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==", - "dev": true + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.24.7.tgz", + "integrity": "sha512-yGWW5Rr+sQOhK0Ot8hjDJuxU3XLRQGflvT4lhlSY0DFvdb3TwKaY26CJzHtYllU0vT9j58hc37ndFPsqT1SrzA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } }, - "node_modules/asn1": { - "version": "0.2.6", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", - "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.25.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.25.4.tgz", + "integrity": "sha512-ao8BG7E2b/URaUQGqN3Tlsg+M3KlHY6rJ1O1gXAEUnZoyNQnvKyH87Kfg+FoxSeyWUB8ISZZsC91C44ZuBFytw==", "dev": true, "dependencies": { - "safer-buffer": "~2.1.0" + "@babel/helper-create-class-features-plugin": "^7.25.4", + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==", + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.24.7.tgz", + "integrity": "sha512-9z76mxwnwFxMyxZWEgdgECQglF2Q7cFLm0kMf8pGwt+GSJsY0cONKj/UuO4bOH0w/uAel3ekS4ra5CEAyJRmDA==", "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-create-class-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + }, "engines": { - "node": ">=0.8" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/ast-types": { - "version": "0.13.4", - "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.13.4.tgz", - "integrity": "sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w==", + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.24.7.tgz", + "integrity": "sha512-EMi4MLQSHfd2nrCqQEWxFdha2gBCqU4ZcCng4WBGZ5CJL4bBRW0ptdqqDdeirGZcpALazVVNJqRmsO8/+oNCBA==", "dev": true, "dependencies": { - "tslib": "^2.0.1" + "@babel/helper-plugin-utils": "^7.24.7" }, "engines": { - "node": ">=4" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.24.7.tgz", + "integrity": "sha512-lq3fvXPdimDrlg6LWBoqj+r/DEWgONuwjuOuQCSYgRroXDH/IdM1C0IZf59fL5cHLpjEH/O6opIRBbqv7ELnuA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "regenerator-transform": "^0.15.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator/node_modules/regenerator-transform": { + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", + "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", + "dev": true, + "dependencies": { + "@babel/runtime": "^7.8.4" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.24.7.tgz", + "integrity": "sha512-0DUq0pHcPKbjFZCfTss/pGkYMfy3vFWydkUBd9r0GHpIyfs2eCDENvqadMycRS9wZCXR41wucAfJHJmwA0UmoQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.24.7.tgz", + "integrity": "sha512-KsDsevZMDsigzbA09+vacnLpmPH4aWjcZjXdyFKGzpplxhbeB4wYtury3vglQkg6KM/xEPKt73eCjPPf1PgXBA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.24.7.tgz", + "integrity": "sha512-x96oO0I09dgMDxJaANcRyD4ellXFLLiWhuwDxKZX5g2rWP1bTPkBSwCYv96VDXVT1bD9aPj8tppr5ITIh8hBng==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.24.7.tgz", + "integrity": "sha512-kHPSIJc9v24zEml5geKg9Mjx5ULpfncj0wRpYtxbvKyTtHCYDkVE3aHQ03FrpEo4gEe2vrJJS1Y9CJTaThA52g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.24.7.tgz", + "integrity": "sha512-AfDTQmClklHCOLxtGoP7HkeMw56k1/bTQjwsfhL6pppo/M4TOBSq+jjBUBLmV/4oeFg4GWMavIl44ZeCtmmZTw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.24.8.tgz", + "integrity": "sha512-adNTUpDCVnmAE58VEqKlAA6ZBlNkMnWD0ZcW76lyNFN3MJniyGFZfNwERVk8Ap56MCnXztmDr19T4mPTztcuaw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.25.2.tgz", + "integrity": "sha512-lBwRvjSmqiMYe/pS0+1gggjJleUJi7NzjvQ1Fkqtt69hBa/0t1YuW/MLQMAPixfwaQOHUXsd6jeU3Z+vdGv3+A==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.24.7", + "@babel/helper-create-class-features-plugin": "^7.25.0", + "@babel/helper-plugin-utils": "^7.24.8", + "@babel/helper-skip-transparent-expression-wrappers": "^7.24.7", + "@babel/plugin-syntax-typescript": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.24.7.tgz", + "integrity": "sha512-U3ap1gm5+4edc2Q/P+9VrBNhGkfnf+8ZqppY71Bo/pzZmXhhLdqgaUl6cuB07O1+AQJtCLfaOmswiNbSQ9ivhw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.24.7.tgz", + "integrity": "sha512-uH2O4OV5M9FZYQrwc7NdVmMxQJOCCzFeYudlZSzUAHRFeOujQefa92E74TQDVskNHCzOXoigEuoyzHDhaEaK5w==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.24.7.tgz", + "integrity": "sha512-hlQ96MBZSAXUq7ltkjtu3FJCCSMx/j629ns3hA3pXnBXjanNP0LHi+JpPeA81zaWgVK1VGH95Xuy7u0RyQ8kMg==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.24.7", + "@babel/helper-plugin-utils": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.25.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.25.4.tgz", + "integrity": "sha512-qesBxiWkgN1Q+31xUE9RcMk79eOXXDCv6tfyGMRSs4RGlioSg2WVyQAm07k726cSE56pa+Kb0y9epX2qaXzTvA==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.25.2", + "@babel/helper-plugin-utils": "^7.24.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.25.4", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.25.4.tgz", + "integrity": "sha512-W9Gyo+KmcxjGahtt3t9fb14vFRWvPpu5pT6GBlovAK6BTBcxgjfVMSQCfJl4oi35ODrxP6xx2Wr8LNST57Mraw==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.25.4", + "@babel/helper-compilation-targets": "^7.25.2", + "@babel/helper-plugin-utils": "^7.24.8", + "@babel/helper-validator-option": "^7.24.8", + "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.25.3", + "@babel/plugin-bugfix-safari-class-field-initializer-scope": "^7.25.0", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.25.0", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.24.7", + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.25.0", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.24.7", + "@babel/plugin-syntax-import-attributes": "^7.24.7", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.24.7", + "@babel/plugin-transform-async-generator-functions": "^7.25.4", + "@babel/plugin-transform-async-to-generator": "^7.24.7", + "@babel/plugin-transform-block-scoped-functions": "^7.24.7", + "@babel/plugin-transform-block-scoping": "^7.25.0", + "@babel/plugin-transform-class-properties": "^7.25.4", + "@babel/plugin-transform-class-static-block": "^7.24.7", + "@babel/plugin-transform-classes": "^7.25.4", + "@babel/plugin-transform-computed-properties": "^7.24.7", + "@babel/plugin-transform-destructuring": "^7.24.8", + "@babel/plugin-transform-dotall-regex": "^7.24.7", + "@babel/plugin-transform-duplicate-keys": "^7.24.7", + "@babel/plugin-transform-duplicate-named-capturing-groups-regex": "^7.25.0", + "@babel/plugin-transform-dynamic-import": "^7.24.7", + "@babel/plugin-transform-exponentiation-operator": "^7.24.7", + "@babel/plugin-transform-export-namespace-from": "^7.24.7", + "@babel/plugin-transform-for-of": "^7.24.7", + "@babel/plugin-transform-function-name": "^7.25.1", + "@babel/plugin-transform-json-strings": "^7.24.7", + "@babel/plugin-transform-literals": "^7.25.2", + "@babel/plugin-transform-logical-assignment-operators": "^7.24.7", + "@babel/plugin-transform-member-expression-literals": "^7.24.7", + "@babel/plugin-transform-modules-amd": "^7.24.7", + "@babel/plugin-transform-modules-commonjs": "^7.24.8", + "@babel/plugin-transform-modules-systemjs": "^7.25.0", + "@babel/plugin-transform-modules-umd": "^7.24.7", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.24.7", + "@babel/plugin-transform-new-target": "^7.24.7", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.24.7", + "@babel/plugin-transform-numeric-separator": "^7.24.7", + "@babel/plugin-transform-object-rest-spread": "^7.24.7", + "@babel/plugin-transform-object-super": "^7.24.7", + "@babel/plugin-transform-optional-catch-binding": "^7.24.7", + "@babel/plugin-transform-optional-chaining": "^7.24.8", + "@babel/plugin-transform-parameters": "^7.24.7", + "@babel/plugin-transform-private-methods": "^7.25.4", + "@babel/plugin-transform-private-property-in-object": "^7.24.7", + "@babel/plugin-transform-property-literals": "^7.24.7", + "@babel/plugin-transform-regenerator": "^7.24.7", + "@babel/plugin-transform-reserved-words": "^7.24.7", + "@babel/plugin-transform-shorthand-properties": "^7.24.7", + "@babel/plugin-transform-spread": "^7.24.7", + "@babel/plugin-transform-sticky-regex": "^7.24.7", + "@babel/plugin-transform-template-literals": "^7.24.7", + "@babel/plugin-transform-typeof-symbol": "^7.24.8", + "@babel/plugin-transform-unicode-escapes": "^7.24.7", + "@babel/plugin-transform-unicode-property-regex": "^7.24.7", + "@babel/plugin-transform-unicode-regex": "^7.24.7", + "@babel/plugin-transform-unicode-sets-regex": "^7.25.4", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.10.6", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "core-js-compat": "^3.37.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.24.7.tgz", + "integrity": "sha512-SyXRe3OdWwIwalxDg5UtJnJQO+YPcTfwiIY2B0Xlddh9o7jpWLvv8X1RthIeDOxQ+O1ML5BLPCONToObyVQVuQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7", + "@babel/helper-validator-option": "^7.24.7", + "@babel/plugin-syntax-jsx": "^7.24.7", + "@babel/plugin-transform-modules-commonjs": "^7.24.7", + "@babel/plugin-transform-typescript": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==", + "dev": true + }, + "node_modules/@babel/runtime": { + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.25.6.tgz", + "integrity": "sha512-VBj9MYyDb9tuLq7yzqjgzt6Q+IBQLrGZfdjOekyEirZPHxXWoTSGUTMrpsfi58Up73d13NfYLv8HT9vmznjzhQ==", + "dev": true, + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime/node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==", + "dev": true + }, + "node_modules/@babel/template": { + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.0.tgz", + "integrity": "sha512-aOOgh1/5XzKvg1jvVz7AVrx2piJ2XBi227DHmbY6y+bM9H2FlN+IfecYu4Xl0cNiiVejlsCri89LUsbj8vJD9Q==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/parser": "^7.25.0", + "@babel/types": "^7.25.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.25.6.tgz", + "integrity": "sha512-9Vrcx5ZW6UwK5tvqsj0nGpp/XzqthkT0dqIc9g1AdtygFToNtTF67XzYS//dm+SAK9cp3B9R4ZO/46p63SCjlQ==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.25.6", + "@babel/parser": "^7.25.6", + "@babel/template": "^7.25.0", + "@babel/types": "^7.25.6", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.25.6", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.25.6.tgz", + "integrity": "sha512-/l42B1qxpG6RdfYf343Uw1vmDjeNhneUXtzhojE7pDgfpEypmRhI6j1kr17XCVv4Cgl9HdAiQY2x0GwKm7rWCw==", + "dev": true, + "dependencies": { + "@babel/helper-string-parser": "^7.24.8", + "@babel/helper-validator-identifier": "^7.24.7", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "dev": true, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==", + "dev": true + }, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "dev": true, + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "dev": true, + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", + "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.5.tgz", + "integrity": "sha512-UTYAUj/wviwdsMfzoSJspJxbkH5o1snzwX0//0ENX1u/55kkZZkcTZP6u9bwKGkv+dkk9at4m1Cpt0uY80kcpQ==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", + "dev": true + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@leichtgewicht/ip-codec": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz", + "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==", + "dev": true + }, + "node_modules/@puppeteer/browsers": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/@puppeteer/browsers/-/browsers-2.4.0.tgz", + "integrity": "sha512-x8J1csfIygOwf6D6qUAZ0ASk3z63zPb7wkNeHRerCMh82qWKUrOgkuP005AJC8lDL6/evtXETGEJVcwykKT4/g==", + "dev": true, + "dependencies": { + "debug": "^4.3.6", + "extract-zip": "^2.0.1", + "progress": "^2.0.3", + "proxy-agent": "^6.4.0", + "semver": "^7.6.3", + "tar-fs": "^3.0.6", + "unbzip2-stream": "^1.4.3", + "yargs": "^17.7.2" + }, + "bin": { + "browsers": "lib/cjs/main-cli.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@puppeteer/browsers/node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "dev": true, + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/@puppeteer/browsers/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true + }, + "node_modules/@puppeteer/browsers/node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@sideway/address": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz", + "integrity": "sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==", + "dev": true, + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==", + "dev": true + }, + "node_modules/@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==", + "dev": true + }, + "node_modules/@tootallnate/quickjs-emscripten": { + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/@tootallnate/quickjs-emscripten/-/quickjs-emscripten-0.23.0.tgz", + "integrity": "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==", + "dev": true + }, + "node_modules/@tsconfig/node10": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.9.tgz", + "integrity": "sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==", + "dev": true + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true + }, + "node_modules/@types/body-parser": { + "version": "1.19.2", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", + "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", + "dev": true, + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/bonjour": { + "version": "3.5.10", + "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.10.tgz", + "integrity": "sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.35", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", + "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect-history-api-fallback": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.0.tgz", + "integrity": "sha512-4x5FkPpLipqwthjPsF7ZRbOv3uoLUFkTA9G9v583qi4pACvq0uTELrB8OLUzPWUI4IJIyvM85vzkV1nyiI2Lig==", + "dev": true, + "dependencies": { + "@types/express-serve-static-core": "*", + "@types/node": "*" + } + }, + "node_modules/@types/eslint": { + "version": "8.44.2", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.44.2.tgz", + "integrity": "sha512-sdPRb9K6iL5XZOmBubg8yiFp5yS/JdUDQsq5e6h95km91MCYMuvp7mh1fjPEYUhvHepKpZOjnEaMBR4PxjWDzg==", + "dev": true, + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.4", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.4.tgz", + "integrity": "sha512-9K4zoImiZc3HlIp6AVUDE4CWYx22a+lhSZMYNpbjW04+YF0KWj4pJXnEMjdnFTiQibFFmElcsasJXDbdI/EPhA==", + "dev": true, + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz", + "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==", + "dev": true + }, + "node_modules/@types/express": { + "version": "4.17.17", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.17.tgz", + "integrity": "sha512-Q4FmmuLGBG58btUnfS1c1r/NQdlp3DMfGDGig8WhfpA2YRUtEkxAjkZb0yvplJGYdF1fsQ81iMDcH24sSCNC/Q==", + "dev": true, + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.17.36", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.36.tgz", + "integrity": "sha512-zbivROJ0ZqLAtMzgzIUC4oNqDG9iF0lSsAqpOD9kbs5xcIM3dTiyuHvBc7R8MtWBp3AAWGaovJa+wzWPjLYW7Q==", + "dev": true, + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==", + "dev": true + }, + "node_modules/@types/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-/K3ds8TRAfBvi5vfjuz8y6+GiAYBZ0x4tXv1Av6CWBWn0IlADc+ZX9pMq7oU0fNQPnBwIZl3rmeLp6SBApbxSQ==", + "dev": true + }, + "node_modules/@types/http-proxy": { + "version": "1.17.11", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.11.tgz", + "integrity": "sha512-HC8G7c1WmaF2ekqpnFq626xd3Zz0uvaqFmBJNRZCGEZCXkvSdJoNFn/8Ygbd9fKNQj8UzLdCETaI0UWPAjK7IA==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.12", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.12.tgz", + "integrity": "sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==", + "dev": true + }, + "node_modules/@types/mime": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz", + "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==", + "dev": true + }, + "node_modules/@types/node": { + "version": "20.5.7", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.7.tgz", + "integrity": "sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==", + "dev": true + }, + "node_modules/@types/qs": { + "version": "6.9.8", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.8.tgz", + "integrity": "sha512-u95svzDlTysU5xecFNTgfFG5RUWu1A9P0VzgpcIiGZA9iraHOdSzcxMxQ55DyeRaGCSxQi7LxXDI4rzq/MYfdg==", "dev": true }, - "node_modules/aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==", - "dev": true, - "engines": { - "node": "*" - } + "node_modules/@types/range-parser": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", + "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==", + "dev": true }, - "node_modules/aws4": { - "version": "1.12.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.12.0.tgz", - "integrity": "sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==", + "node_modules/@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", "dev": true }, - "node_modules/axios": { - "version": "1.6.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.2.tgz", - "integrity": "sha512-7i24Ri4pmDRfJTR7LDBhsOTtcm+9kjX5WiY1X3wIisx6G9So3pfMkEiU7emUBe46oceVImccTEM3k6C5dbVW8A==", + "node_modules/@types/send": { + "version": "0.17.1", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.1.tgz", + "integrity": "sha512-Cwo8LE/0rnvX7kIIa3QHCkcuF21c05Ayb0ZfxPiv0W8VRiZiNW/WuRupHKpqqGVGf7SUA44QSOUKaEd9lIrd/Q==", "dev": true, "dependencies": { - "follow-redirects": "^1.15.0", - "form-data": "^4.0.0", - "proxy-from-env": "^1.1.0" + "@types/mime": "^1", + "@types/node": "*" } }, - "node_modules/axios/node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "node_modules/@types/serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==", "dev": true, "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" + "@types/express": "*" } }, - "node_modules/b4a": { - "version": "1.6.4", - "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.6.4.tgz", - "integrity": "sha512-fpWrvyVHEKyeEvbKZTVOeZF3VSKKWtJxFIxX/jaVPf+cLbGUSitjb49pHLqPV2BUNNZ0LcoeEGfE/YCpyDYHIw==", - "dev": true - }, - "node_modules/babel-code-frame": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-code-frame/-/babel-code-frame-6.26.0.tgz", - "integrity": "sha512-XqYMR2dfdGMW+hd0IUZ2PwK+fGeFkOxZJ0wY+JaQAHzt1Zx8LcvpiZD2NiGkEG8qx0CfkAOr5xt76d1e8vG90g==", + "node_modules/@types/serve-static": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.2.tgz", + "integrity": "sha512-J2LqtvFYCzaj8pVYKw8klQXrLLk7TBZmQ4ShlcdkELFKGwGMfevMLneMMRkMgZxotOD9wg497LpC7O8PcvAmfw==", "dev": true, "dependencies": { - "chalk": "^1.1.3", - "esutils": "^2.0.2", - "js-tokens": "^3.0.2" - } - }, - "node_modules/babel-code-frame/node_modules/ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha512-TIGnTpdo+E3+pCyAluZvtED5p5wCqLdezCyhPZzKPcxvFplEt4i+W7OONCKgeZFT3+y5NZZfOOS/Bdcanm1MYA==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/babel-code-frame/node_modules/ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha512-kmCevFghRiWM7HB5zTPULl4r9bVFSWjz62MhqizDGUrq2NWuNMQyuv4tHHoKJHs69M/MF64lEcHdYIocrdWQYA==", - "dev": true, - "engines": { - "node": ">=0.10.0" + "@types/http-errors": "*", + "@types/mime": "*", + "@types/node": "*" } }, - "node_modules/babel-code-frame/node_modules/chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha512-U3lRVLMSlsCfjqYPbLyVv11M9CPW4I728d6TCKMAOJueEeB9/8o+eSsMnxPJD+Q+K909sdESg7C+tIkoH6on1A==", + "node_modules/@types/sockjs": { + "version": "0.3.33", + "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.33.tgz", + "integrity": "sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==", "dev": true, "dependencies": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" + "@types/node": "*" } }, - "node_modules/babel-code-frame/node_modules/js-tokens": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-3.0.2.tgz", - "integrity": "sha512-RjTcuD4xjtthQkaWH7dFlH85L+QaVtSoOyGdZ3g6HFhS9dFNDfLyqgm2NFe2X6cQpeFmt0452FJjFG5UameExg==", - "dev": true - }, - "node_modules/babel-code-frame/node_modules/strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha512-VhumSSbBqDTP8p2ZLKj40UjBCV4+v8bUSEpUb4KjRgWk9pbqGF4REFj6KEagidb2f/M6AzC0EmFyDNGaw9OCzg==", + "node_modules/@types/ws": { + "version": "8.5.5", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.5.tgz", + "integrity": "sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg==", "dev": true, "dependencies": { - "ansi-regex": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" + "@types/node": "*" } }, - "node_modules/babel-code-frame/node_modules/supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha512-KKNVtd6pCYgPIKU4cp2733HWYCpplQhddZLBUryaAHou723x+FRzQ5Df824Fj+IyyuiQTRoub4SnIFfIcrp70g==", + "node_modules/@types/yauzl": { + "version": "2.10.3", + "resolved": "https://registry.npmjs.org/@types/yauzl/-/yauzl-2.10.3.tgz", + "integrity": "sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==", "dev": true, - "engines": { - "node": ">=0.8.0" + "optional": true, + "dependencies": { + "@types/node": "*" } }, - "node_modules/babel-core": { - "version": "6.26.3", - "resolved": "https://registry.npmjs.org/babel-core/-/babel-core-6.26.3.tgz", - "integrity": "sha512-6jyFLuDmeidKmUEb3NM+/yawG0M2bDZ9Z1qbZP59cyHLz8kYGKYwpJP0UwUKKUiTRNvxfLesJnTedqczP7cTDA==", - "dev": true, - "dependencies": { - "babel-code-frame": "^6.26.0", - "babel-generator": "^6.26.0", - "babel-helpers": "^6.24.1", - "babel-messages": "^6.23.0", - "babel-register": "^6.26.0", - "babel-runtime": "^6.26.0", - "babel-template": "^6.26.0", - "babel-traverse": "^6.26.0", - "babel-types": "^6.26.0", - "babylon": "^6.18.0", - "convert-source-map": "^1.5.1", - "debug": "^2.6.9", - "json5": "^0.5.1", - "lodash": "^4.17.4", - "minimatch": "^3.0.4", - "path-is-absolute": "^1.0.1", - "private": "^0.1.8", - "slash": "^1.0.0", - "source-map": "^0.5.7" - } - }, - "node_modules/babel-core/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "node_modules/@webassemblyjs/ast": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz", + "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", "dev": true, "dependencies": { - "ms": "2.0.0" + "@webassemblyjs/helper-numbers": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6" } }, - "node_modules/babel-core/node_modules/json5": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", - "integrity": "sha512-4xrs1aW+6N5DalkqSVA8fxh458CXvR99WU8WLKmq4v8eWAL86Xo3BVqyd3SkA9wEVjCMqyvvRRkshAdOnBp5rw==", - "dev": true, - "bin": { - "json5": "lib/cli.js" - } + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", + "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==", + "dev": true }, - "node_modules/babel-core/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", + "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==", + "dev": true + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.6.tgz", + "integrity": "sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==", "dev": true }, - "node_modules/babel-generator": { - "version": "6.26.1", - "resolved": "https://registry.npmjs.org/babel-generator/-/babel-generator-6.26.1.tgz", - "integrity": "sha512-HyfwY6ApZj7BYTcJURpM5tznulaBvyio7/0d4zFOeMPUmfxkCjHocCuoLa2SAGzBI8AREcH3eP3758F672DppA==", + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", + "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", "dev": true, "dependencies": { - "babel-messages": "^6.23.0", - "babel-runtime": "^6.26.0", - "babel-types": "^6.26.0", - "detect-indent": "^4.0.0", - "jsesc": "^1.3.0", - "lodash": "^4.17.4", - "source-map": "^0.5.7", - "trim-right": "^1.0.1" + "@webassemblyjs/floating-point-hex-parser": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@xtuc/long": "4.2.2" } }, - "node_modules/babel-generator/node_modules/jsesc": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-1.3.0.tgz", - "integrity": "sha512-Mke0DA0QjUWuJlhsE0ZPPhYiJkRap642SmI/4ztCFaUs6V2AiH1sfecc+57NgaryfAA2VR3v6O+CSjC1jZJKOA==", - "dev": true, - "bin": { - "jsesc": "bin/jsesc" - } + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", + "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==", + "dev": true }, - "node_modules/babel-helper-call-delegate": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz", - "integrity": "sha512-RL8n2NiEj+kKztlrVJM9JT1cXzzAdvWFh76xh/H1I4nKwunzE4INBXn8ieCZ+wh4zWszZk7NBS1s/8HR5jDkzQ==", + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.6.tgz", + "integrity": "sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==", "dev": true, "dependencies": { - "babel-helper-hoist-variables": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-traverse": "^6.24.1", - "babel-types": "^6.24.1" + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6" } }, - "node_modules/babel-helper-define-map": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-helper-define-map/-/babel-helper-define-map-6.26.0.tgz", - "integrity": "sha512-bHkmjcC9lM1kmZcVpA5t2om2nzT/xiZpo6TJq7UlZ3wqKfzia4veeXbIhKvJXAMzhhEBd3cR1IElL5AenWEUpA==", + "node_modules/@webassemblyjs/ieee754": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", + "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", "dev": true, "dependencies": { - "babel-helper-function-name": "^6.24.1", - "babel-runtime": "^6.26.0", - "babel-types": "^6.26.0", - "lodash": "^4.17.4" + "@xtuc/ieee754": "^1.2.0" } }, - "node_modules/babel-helper-function-name": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz", - "integrity": "sha512-Oo6+e2iX+o9eVvJ9Y5eKL5iryeRdsIkwRYheCuhYdVHsdEQysbc2z2QkqCLIYnNxkT5Ss3ggrHdXiDI7Dhrn4Q==", + "node_modules/@webassemblyjs/leb128": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", + "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", "dev": true, "dependencies": { - "babel-helper-get-function-arity": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1", - "babel-traverse": "^6.24.1", - "babel-types": "^6.24.1" + "@xtuc/long": "4.2.2" } }, - "node_modules/babel-helper-get-function-arity": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz", - "integrity": "sha512-WfgKFX6swFB1jS2vo+DwivRN4NB8XUdM3ij0Y1gnC21y1tdBoe6xjVnd7NSI6alv+gZXCtJqvrTeMW3fR/c0ng==", - "dev": true, - "dependencies": { - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" - } + "node_modules/@webassemblyjs/utf8": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", + "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==", + "dev": true }, - "node_modules/babel-helper-hoist-variables": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz", - "integrity": "sha512-zAYl3tqerLItvG5cKYw7f1SpvIxS9zi7ohyGHaI9cgDUjAT6YcY9jIEH5CstetP5wHIVSceXwNS7Z5BpJg+rOw==", + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.6.tgz", + "integrity": "sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==", "dev": true, "dependencies": { - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/helper-wasm-section": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-opt": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6", + "@webassemblyjs/wast-printer": "1.11.6" } }, - "node_modules/babel-helper-optimise-call-expression": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz", - "integrity": "sha512-Op9IhEaxhbRT8MDXx2iNuMgciu2V8lDvYCNQbDGjdBNCjaMvyLf4wl4A3b8IgndCyQF8TwfgsQ8T3VD8aX1/pA==", + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.6.tgz", + "integrity": "sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==", "dev": true, "dependencies": { - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" } }, - "node_modules/babel-helper-regex": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-helper-regex/-/babel-helper-regex-6.26.0.tgz", - "integrity": "sha512-VlPiWmqmGJp0x0oK27Out1D+71nVVCTSdlbhIVoaBAj2lUgrNjBCRR9+llO4lTSb2O4r7PJg+RobRkhBrf6ofg==", + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.6.tgz", + "integrity": "sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==", "dev": true, "dependencies": { - "babel-runtime": "^6.26.0", - "babel-types": "^6.26.0", - "lodash": "^4.17.4" + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-buffer": "1.11.6", + "@webassemblyjs/wasm-gen": "1.11.6", + "@webassemblyjs/wasm-parser": "1.11.6" } }, - "node_modules/babel-helper-replace-supers": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz", - "integrity": "sha512-sLI+u7sXJh6+ToqDr57Bv973kCepItDhMou0xCP2YPVmR1jkHSCY+p1no8xErbV1Siz5QE8qKT1WIwybSWlqjw==", + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz", + "integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==", "dev": true, "dependencies": { - "babel-helper-optimise-call-expression": "^6.24.1", - "babel-messages": "^6.23.0", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1", - "babel-traverse": "^6.24.1", - "babel-types": "^6.24.1" + "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/helper-wasm-bytecode": "1.11.6", + "@webassemblyjs/ieee754": "1.11.6", + "@webassemblyjs/leb128": "1.11.6", + "@webassemblyjs/utf8": "1.11.6" } }, - "node_modules/babel-helpers": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-helpers/-/babel-helpers-6.24.1.tgz", - "integrity": "sha512-n7pFrqQm44TCYvrCDb0MqabAF+JUBq+ijBvNMUxpkLjJaAu32faIexewMumrH5KLLJ1HDyT0PTEqRyAe/GwwuQ==", + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.11.6", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.6.tgz", + "integrity": "sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==", "dev": true, "dependencies": { - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1" + "@webassemblyjs/ast": "1.11.6", + "@xtuc/long": "4.2.2" } }, - "node_modules/babel-loader": { - "version": "9.1.3", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.1.3.tgz", - "integrity": "sha512-xG3ST4DglodGf8qSwv0MdeWLhrDsw/32QMdTO5T1ZIp9gQur0HkCyFs7Awskr10JKXFXwpAhiCuYX5oGXnRGbw==", + "node_modules/@webpack-cli/configtest": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@webpack-cli/configtest/-/configtest-2.1.1.tgz", + "integrity": "sha512-wy0mglZpDSiSS0XHrVR+BAdId2+yxPSoJW8fsna3ZpYSlufjvxnP4YbKTCBZnNIcGN4r6ZPXV55X4mYExOfLmw==", "dev": true, - "dependencies": { - "find-cache-dir": "^4.0.0", - "schema-utils": "^4.0.0" - }, "engines": { - "node": ">= 14.15.0" + "node": ">=14.15.0" }, "peerDependencies": { - "@babel/core": "^7.12.0", - "webpack": ">=5" - } - }, - "node_modules/babel-messages": { - "version": "6.23.0", - "resolved": "https://registry.npmjs.org/babel-messages/-/babel-messages-6.23.0.tgz", - "integrity": "sha512-Bl3ZiA+LjqaMtNYopA9TYE9HP1tQ+E5dLxE0XrAzcIJeK2UqF0/EaqXwBn9esd4UmTfEab+P+UYQ1GnioFIb/w==", - "dev": true, - "dependencies": { - "babel-runtime": "^6.22.0" - } - }, - "node_modules/babel-plugin-check-es2015-constants": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz", - "integrity": "sha512-B1M5KBP29248dViEo1owyY32lk1ZSH2DaNNrXLGt8lyjjHm7pBqAdQ7VKUPR6EEDO323+OvT3MQXbCin8ooWdA==", - "dev": true, - "dependencies": { - "babel-runtime": "^6.22.0" - } - }, - "node_modules/babel-plugin-transform-es2015-arrow-functions": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz", - "integrity": "sha512-PCqwwzODXW7JMrzu+yZIaYbPQSKjDTAsNNlK2l5Gg9g4rz2VzLnZsStvp/3c46GfXpwkyufb3NCyG9+50FF1Vg==", - "dev": true, - "dependencies": { - "babel-runtime": "^6.22.0" + "webpack": "5.x.x", + "webpack-cli": "5.x.x" } }, - "node_modules/babel-plugin-transform-es2015-block-scoped-functions": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz", - "integrity": "sha512-2+ujAT2UMBzYFm7tidUsYh+ZoIutxJ3pN9IYrF1/H6dCKtECfhmB8UkHVpyxDwkj0CYbQG35ykoz925TUnBc3A==", + "node_modules/@webpack-cli/info": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@webpack-cli/info/-/info-2.0.2.tgz", + "integrity": "sha512-zLHQdI/Qs1UyT5UBdWNqsARasIA+AaF8t+4u2aS2nEpBQh2mWIVb8qAklq0eUENnC5mOItrIB4LiS9xMtph18A==", "dev": true, - "dependencies": { - "babel-runtime": "^6.22.0" + "engines": { + "node": ">=14.15.0" + }, + "peerDependencies": { + "webpack": "5.x.x", + "webpack-cli": "5.x.x" } }, - "node_modules/babel-plugin-transform-es2015-block-scoping": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.26.0.tgz", - "integrity": "sha512-YiN6sFAQ5lML8JjCmr7uerS5Yc/EMbgg9G8ZNmk2E3nYX4ckHR01wrkeeMijEf5WHNK5TW0Sl0Uu3pv3EdOJWw==", + "node_modules/@webpack-cli/serve": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@webpack-cli/serve/-/serve-2.0.5.tgz", + "integrity": "sha512-lqaoKnRYBdo1UgDX8uF24AfGMifWK19TxPmM5FHc2vAGxrJ/qtyUyFBWoY1tISZdelsQ5fBcOusifo5o5wSJxQ==", "dev": true, - "dependencies": { - "babel-runtime": "^6.26.0", - "babel-template": "^6.26.0", - "babel-traverse": "^6.26.0", - "babel-types": "^6.26.0", - "lodash": "^4.17.4" + "engines": { + "node": ">=14.15.0" + }, + "peerDependencies": { + "webpack": "5.x.x", + "webpack-cli": "5.x.x" + }, + "peerDependenciesMeta": { + "webpack-dev-server": { + "optional": true + } } }, - "node_modules/babel-plugin-transform-es2015-classes": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz", - "integrity": "sha512-5Dy7ZbRinGrNtmWpquZKZ3EGY8sDgIVB4CU8Om8q8tnMLrD/m94cKglVcHps0BCTdZ0TJeeAWOq2TK9MIY6cag==", - "dev": true, - "dependencies": { - "babel-helper-define-map": "^6.24.1", - "babel-helper-function-name": "^6.24.1", - "babel-helper-optimise-call-expression": "^6.24.1", - "babel-helper-replace-supers": "^6.24.1", - "babel-messages": "^6.23.0", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1", - "babel-traverse": "^6.24.1", - "babel-types": "^6.24.1" - } + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", + "dev": true }, - "node_modules/babel-plugin-transform-es2015-computed-properties": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz", - "integrity": "sha512-C/uAv4ktFP/Hmh01gMTvYvICrKze0XVX9f2PdIXuriCSvUmV9j+u+BB9f5fJK3+878yMK6dkdcq+Ymr9mrcLzw==", - "dev": true, - "dependencies": { - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1" - } + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", + "dev": true }, - "node_modules/babel-plugin-transform-es2015-destructuring": { - "version": "6.23.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz", - "integrity": "sha512-aNv/GDAW0j/f4Uy1OEPZn1mqD+Nfy9viFGBfQ5bZyT35YqOiqx7/tXdyfZkJ1sC21NyEsBdfDY6PYmLHF4r5iA==", + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", "dev": true, "dependencies": { - "babel-runtime": "^6.22.0" + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" } }, - "node_modules/babel-plugin-transform-es2015-duplicate-keys": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz", - "integrity": "sha512-ossocTuPOssfxO2h+Z3/Ea1Vo1wWx31Uqy9vIiJusOP4TbF7tPs9U0sJ9pX9OJPf4lXRGj5+6Gkl/HHKiAP5ug==", + "node_modules/acorn": { + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", + "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", "dev": true, - "dependencies": { - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" } }, - "node_modules/babel-plugin-transform-es2015-for-of": { - "version": "6.23.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz", - "integrity": "sha512-DLuRwoygCoXx+YfxHLkVx5/NpeSbVwfoTeBykpJK7JhYWlL/O8hgAK/reforUnZDlxasOrVPPJVI/guE3dCwkw==", + "node_modules/acorn-import-assertions": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", + "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", "dev": true, - "dependencies": { - "babel-runtime": "^6.22.0" + "peerDependencies": { + "acorn": "^8" } }, - "node_modules/babel-plugin-transform-es2015-function-name": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz", - "integrity": "sha512-iFp5KIcorf11iBqu/y/a7DK3MN5di3pNCzto61FqCNnUX4qeBwcV1SLqe10oXNnCaxBUImX3SckX2/o1nsrTcg==", + "node_modules/acorn-walk": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", + "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", "dev": true, - "dependencies": { - "babel-helper-function-name": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "engines": { + "node": ">=0.4.0" } }, - "node_modules/babel-plugin-transform-es2015-literals": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz", - "integrity": "sha512-tjFl0cwMPpDYyoqYA9li1/7mGFit39XiNX5DKC/uCNjBctMxyL1/PT/l4rSlbvBG1pOKI88STRdUsWXB3/Q9hQ==", + "node_modules/agent-base": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.1.tgz", + "integrity": "sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA==", "dev": true, "dependencies": { - "babel-runtime": "^6.22.0" + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" } }, - "node_modules/babel-plugin-transform-es2015-modules-amd": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz", - "integrity": "sha512-LnIIdGWIKdw7zwckqx+eGjcS8/cl8D74A3BpJbGjKTFFNJSMrjN4bIh22HY1AlkUbeLG6X6OZj56BDvWD+OeFA==", + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, "dependencies": { - "babel-plugin-transform-es2015-modules-commonjs": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1" + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" } }, - "node_modules/babel-plugin-transform-es2015-modules-commonjs": { - "version": "6.26.2", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.26.2.tgz", - "integrity": "sha512-CV9ROOHEdrjcwhIaJNBGMBCodN+1cfkwtM1SbUHmvyy35KGT7fohbpOxkE2uLz1o6odKK2Ck/tz47z+VqQfi9Q==", + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", "dev": true, "dependencies": { - "babel-plugin-transform-strict-mode": "^6.24.1", - "babel-runtime": "^6.26.0", - "babel-template": "^6.26.0", - "babel-types": "^6.26.0" + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } } }, - "node_modules/babel-plugin-transform-es2015-modules-systemjs": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz", - "integrity": "sha512-ONFIPsq8y4bls5PPsAWYXH/21Hqv64TBxdje0FvU3MhIV6QM2j5YS7KvAzg/nTIVLot2D2fmFQrFWCbgHlFEjg==", + "node_modules/ajv-formats/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", "dev": true, "dependencies": { - "babel-helper-hoist-variables": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1" + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" } }, - "node_modules/babel-plugin-transform-es2015-modules-umd": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz", - "integrity": "sha512-LpVbiT9CLsuAIp3IG0tfbVo81QIhn6pE8xBJ7XSeCtFlMltuar5VuBV6y6Q45tpui9QWcy5i0vLQfCfrnF7Kiw==", - "dev": true, - "dependencies": { - "babel-plugin-transform-es2015-modules-amd": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1" - } + "node_modules/ajv-formats/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true }, - "node_modules/babel-plugin-transform-es2015-object-super": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz", - "integrity": "sha512-8G5hpZMecb53vpD3mjs64NhI1au24TAmokQ4B+TBFBjN9cVoGoOvotdrMMRmHvVZUEvqGUPWL514woru1ChZMA==", + "node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", "dev": true, - "dependencies": { - "babel-helper-replace-supers": "^6.24.1", - "babel-runtime": "^6.22.0" + "peerDependencies": { + "ajv": "^6.9.1" } }, - "node_modules/babel-plugin-transform-es2015-parameters": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz", - "integrity": "sha512-8HxlW+BB5HqniD+nLkQ4xSAVq3bR/pcYW9IigY+2y0dI+Y7INFeTbfAQr+63T3E4UDsZGjyb+l9txUnABWxlOQ==", + "node_modules/ansi-html-community": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", + "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", "dev": true, - "dependencies": { - "babel-helper-call-delegate": "^6.24.1", - "babel-helper-get-function-arity": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-template": "^6.24.1", - "babel-traverse": "^6.24.1", - "babel-types": "^6.24.1" + "engines": [ + "node >= 0.8.0" + ], + "bin": { + "ansi-html": "bin/ansi-html" } }, - "node_modules/babel-plugin-transform-es2015-shorthand-properties": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz", - "integrity": "sha512-mDdocSfUVm1/7Jw/FIRNw9vPrBQNePy6wZJlR8HAUBLybNp1w/6lr6zZ2pjMShee65t/ybR5pT8ulkLzD1xwiw==", + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "dev": true, - "dependencies": { - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "engines": { + "node": ">=8" } }, - "node_modules/babel-plugin-transform-es2015-spread": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz", - "integrity": "sha512-3Ghhi26r4l3d0Js933E5+IhHwk0A1yiutj9gwvzmFbVV0sPMYk2lekhOufHBswX7NCoSeF4Xrl3sCIuSIa+zOg==", + "node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", "dev": true, "dependencies": { - "babel-runtime": "^6.22.0" + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" } }, - "node_modules/babel-plugin-transform-es2015-sticky-regex": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz", - "integrity": "sha512-CYP359ADryTo3pCsH0oxRo/0yn6UsEZLqYohHmvLQdfS9xkf+MbCzE3/Kolw9OYIY4ZMilH25z/5CbQbwDD+lQ==", + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", "dev": true, "dependencies": { - "babel-helper-regex": "^6.24.1", - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" } }, - "node_modules/babel-plugin-transform-es2015-template-literals": { - "version": "6.22.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz", - "integrity": "sha512-x8b9W0ngnKzDMHimVtTfn5ryimars1ByTqsfBDwAqLibmuuQY6pgBQi5z1ErIsUOWBdw1bW9FSz5RZUojM4apg==", - "dev": true, - "dependencies": { - "babel-runtime": "^6.22.0" - } + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true }, - "node_modules/babel-plugin-transform-es2015-typeof-symbol": { - "version": "6.23.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz", - "integrity": "sha512-fz6J2Sf4gYN6gWgRZaoFXmq93X+Li/8vf+fb0sGDVtdeWvxC9y5/bTD7bvfWMEq6zetGEHpWjtzRGSugt5kNqw==", - "dev": true, - "dependencies": { - "babel-runtime": "^6.22.0" - } + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true }, - "node_modules/babel-plugin-transform-es2015-unicode-regex": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz", - "integrity": "sha512-v61Dbbihf5XxnYjtBN04B/JBvsScY37R1cZT5r9permN1cp+b70DY3Ib3fIkgn1DI9U3tGgBJZVD8p/mE/4JbQ==", - "dev": true, - "dependencies": { - "babel-helper-regex": "^6.24.1", - "babel-runtime": "^6.22.0", - "regexpu-core": "^2.0.0" - } + "node_modules/array-flatten": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", + "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==", + "dev": true }, - "node_modules/babel-plugin-transform-regenerator": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.26.0.tgz", - "integrity": "sha512-LS+dBkUGlNR15/5WHKe/8Neawx663qttS6AGqoOUhICc9d1KciBvtrQSuc0PI+CxQ2Q/S1aKuJ+u64GtLdcEZg==", + "node_modules/ast-types": { + "version": "0.13.4", + "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.13.4.tgz", + "integrity": "sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w==", "dev": true, "dependencies": { - "regenerator-transform": "^0.10.0" + "tslib": "^2.0.1" + }, + "engines": { + "node": ">=4" } }, - "node_modules/babel-plugin-transform-strict-mode": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz", - "integrity": "sha512-j3KtSpjyLSJxNoCDrhwiJad8kw0gJ9REGj8/CqL0HeRyLnvUNYV9zcqluL6QJSXh3nfsLEmSLvwRfGzrgR96Pw==", - "dev": true, - "dependencies": { - "babel-runtime": "^6.22.0", - "babel-types": "^6.24.1" - } + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "dev": true }, - "node_modules/babel-preset-es2015": { - "version": "6.24.1", - "resolved": "https://registry.npmjs.org/babel-preset-es2015/-/babel-preset-es2015-6.24.1.tgz", - "integrity": "sha512-XfwUqG1Ry6R43m4Wfob+vHbIVBIqTg/TJY4Snku1iIzeH7mUnwHA8Vagmv+ZQbPwhS8HgsdQvy28Py3k5zpoFQ==", - "deprecated": "🙌 Thanks for using Babel: we recommend using babel-preset-env now: please read https://babeljs.io/env to update!", + "node_modules/axios": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.2.tgz", + "integrity": "sha512-7i24Ri4pmDRfJTR7LDBhsOTtcm+9kjX5WiY1X3wIisx6G9So3pfMkEiU7emUBe46oceVImccTEM3k6C5dbVW8A==", "dev": true, "dependencies": { - "babel-plugin-check-es2015-constants": "^6.22.0", - "babel-plugin-transform-es2015-arrow-functions": "^6.22.0", - "babel-plugin-transform-es2015-block-scoped-functions": "^6.22.0", - "babel-plugin-transform-es2015-block-scoping": "^6.24.1", - "babel-plugin-transform-es2015-classes": "^6.24.1", - "babel-plugin-transform-es2015-computed-properties": "^6.24.1", - "babel-plugin-transform-es2015-destructuring": "^6.22.0", - "babel-plugin-transform-es2015-duplicate-keys": "^6.24.1", - "babel-plugin-transform-es2015-for-of": "^6.22.0", - "babel-plugin-transform-es2015-function-name": "^6.24.1", - "babel-plugin-transform-es2015-literals": "^6.22.0", - "babel-plugin-transform-es2015-modules-amd": "^6.24.1", - "babel-plugin-transform-es2015-modules-commonjs": "^6.24.1", - "babel-plugin-transform-es2015-modules-systemjs": "^6.24.1", - "babel-plugin-transform-es2015-modules-umd": "^6.24.1", - "babel-plugin-transform-es2015-object-super": "^6.24.1", - "babel-plugin-transform-es2015-parameters": "^6.24.1", - "babel-plugin-transform-es2015-shorthand-properties": "^6.24.1", - "babel-plugin-transform-es2015-spread": "^6.22.0", - "babel-plugin-transform-es2015-sticky-regex": "^6.24.1", - "babel-plugin-transform-es2015-template-literals": "^6.22.0", - "babel-plugin-transform-es2015-typeof-symbol": "^6.22.0", - "babel-plugin-transform-es2015-unicode-regex": "^6.24.1", - "babel-plugin-transform-regenerator": "^6.24.1" + "follow-redirects": "^1.15.0", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" } }, - "node_modules/babel-register": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-register/-/babel-register-6.26.0.tgz", - "integrity": "sha512-veliHlHX06wjaeY8xNITbveXSiI+ASFnOqvne/LaIJIqOWi2Ogmj91KOugEz/hoh/fwMhXNBJPCv8Xaz5CyM4A==", + "node_modules/axios/node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", "dev": true, "dependencies": { - "babel-core": "^6.26.0", - "babel-runtime": "^6.26.0", - "core-js": "^2.5.0", - "home-or-tmp": "^2.0.0", - "lodash": "^4.17.4", - "mkdirp": "^0.5.1", - "source-map-support": "^0.4.15" + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" } }, - "node_modules/babel-runtime": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz", - "integrity": "sha512-ITKNuq2wKlW1fJg9sSW52eepoYgZBggvOAHC0u/CYu/qxQ9EVzThCgR69BnSXLHjy2f7SY5zaQ4yt7H9ZVxY2g==", - "dev": true, - "dependencies": { - "core-js": "^2.4.0", - "regenerator-runtime": "^0.11.0" - } + "node_modules/b4a": { + "version": "1.6.6", + "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.6.6.tgz", + "integrity": "sha512-5Tk1HLk6b6ctmjIkAcU/Ujv/1WqiDl0F0JdRCR80VsOcUlHcu7pWeWRlOqQLHfDEsVx9YH/aif5AG4ehoCtTmg==", + "dev": true }, - "node_modules/babel-template": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-template/-/babel-template-6.26.0.tgz", - "integrity": "sha512-PCOcLFW7/eazGUKIoqH97sO9A2UYMahsn/yRQ7uOk37iutwjq7ODtcTNF+iFDSHNfkctqsLRjLP7URnOx0T1fg==", + "node_modules/babel-loader": { + "version": "9.1.3", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.1.3.tgz", + "integrity": "sha512-xG3ST4DglodGf8qSwv0MdeWLhrDsw/32QMdTO5T1ZIp9gQur0HkCyFs7Awskr10JKXFXwpAhiCuYX5oGXnRGbw==", "dev": true, "dependencies": { - "babel-runtime": "^6.26.0", - "babel-traverse": "^6.26.0", - "babel-types": "^6.26.0", - "babylon": "^6.18.0", - "lodash": "^4.17.4" + "find-cache-dir": "^4.0.0", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0", + "webpack": ">=5" } }, - "node_modules/babel-traverse": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-traverse/-/babel-traverse-6.26.0.tgz", - "integrity": "sha512-iSxeXx7apsjCHe9c7n8VtRXGzI2Bk1rBSOJgCCjfyXb6v1aCqE1KSEpq/8SXuVN8Ka/Rh1WDTF0MDzkvTA4MIA==", + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.11", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.11.tgz", + "integrity": "sha512-sMEJ27L0gRHShOh5G54uAAPaiCOygY/5ratXuiyb2G46FmlSpc9eFCzYVyDiPxfNbwzA7mYahmjQc5q+CZQ09Q==", "dev": true, "dependencies": { - "babel-code-frame": "^6.26.0", - "babel-messages": "^6.23.0", - "babel-runtime": "^6.26.0", - "babel-types": "^6.26.0", - "babylon": "^6.18.0", - "debug": "^2.6.8", - "globals": "^9.18.0", - "invariant": "^2.2.2", - "lodash": "^4.17.4" + "@babel/compat-data": "^7.22.6", + "@babel/helper-define-polyfill-provider": "^0.6.2", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" } }, - "node_modules/babel-traverse/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.10.6", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.6.tgz", + "integrity": "sha512-b37+KR2i/khY5sKmWNVQAnitvquQbNdWy6lJdsr0kmquCKEEUgMKK4SboVM3HtfnZilfjr4MMQ7vY58FVWDtIA==", "dev": true, "dependencies": { - "ms": "2.0.0" + "@babel/helper-define-polyfill-provider": "^0.6.2", + "core-js-compat": "^3.38.0" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" } }, - "node_modules/babel-traverse/node_modules/globals": { - "version": "9.18.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-9.18.0.tgz", - "integrity": "sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ==", + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.2.tgz", + "integrity": "sha512-2R25rQZWP63nGwaAswvDazbPXfrM3HwVoBXK6HcqeKrSrL/JqcC/rDcf95l4r7LXLyxDXc8uQDa064GubtCABg==", "dev": true, - "engines": { - "node": ">=0.10.0" + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" } }, - "node_modules/babel-traverse/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "dev": true }, - "node_modules/babel-types": { - "version": "6.26.0", - "resolved": "https://registry.npmjs.org/babel-types/-/babel-types-6.26.0.tgz", - "integrity": "sha512-zhe3V/26rCWsEZK8kZN+HaQj5yQ1CilTObixFzKW1UWjqG7618Twz6YEsCnjfg5gBcJh02DrpCkS9h98ZqDY+g==", + "node_modules/bare-events": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/bare-events/-/bare-events-2.4.2.tgz", + "integrity": "sha512-qMKFd2qG/36aA4GwvKq8MxnPgCQAmBWmSyLWsJcbn8v03wvIPQ/hG1Ms8bPzndZxMDoHpxez5VOS+gC9Yi24/Q==", + "dev": true, + "optional": true + }, + "node_modules/bare-fs": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/bare-fs/-/bare-fs-2.3.5.tgz", + "integrity": "sha512-SlE9eTxifPDJrT6YgemQ1WGFleevzwY+XAP1Xqgl56HtcrisC2CHCZ2tq6dBpcH2TnNxwUEUGhweo+lrQtYuiw==", "dev": true, + "optional": true, "dependencies": { - "babel-runtime": "^6.26.0", - "esutils": "^2.0.2", - "lodash": "^4.17.4", - "to-fast-properties": "^1.0.3" + "bare-events": "^2.0.0", + "bare-path": "^2.0.0", + "bare-stream": "^2.0.0" } }, - "node_modules/babel-types/node_modules/to-fast-properties": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-1.0.3.tgz", - "integrity": "sha512-lxrWP8ejsq+7E3nNjwYmUBMAgjMTZoTI+sdBOpvNyijeDLa29LUn9QaoXAHv4+Z578hbmHHJKZknzxVtvo77og==", + "node_modules/bare-os": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/bare-os/-/bare-os-2.4.4.tgz", + "integrity": "sha512-z3UiI2yi1mK0sXeRdc4O1Kk8aOa/e+FNWZcTiPB/dfTWyLypuE99LibgRaQki914Jq//yAWylcAt+mknKdixRQ==", "dev": true, - "engines": { - "node": ">=0.10.0" - } + "optional": true }, - "node_modules/babylon": { - "version": "6.18.0", - "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", - "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==", + "node_modules/bare-path": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/bare-path/-/bare-path-2.1.3.tgz", + "integrity": "sha512-lh/eITfU8hrj9Ru5quUp0Io1kJWIk1bTjzo7JH1P5dWmQ2EL4hFUlfI8FonAhSlgIfhn63p84CDY/x+PisgcXA==", "dev": true, - "bin": { - "babylon": "bin/babylon.js" + "optional": true, + "dependencies": { + "bare-os": "^2.1.0" } }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true + "node_modules/bare-stream": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/bare-stream/-/bare-stream-2.3.0.tgz", + "integrity": "sha512-pVRWciewGUeCyKEuRxwv06M079r+fRjAQjBEK2P6OYGrO43O+Z0LrPZZEjlc4mB6C2RpZ9AxJ1s7NLEtOHO6eA==", + "dev": true, + "optional": true, + "dependencies": { + "b4a": "^1.6.6", + "streamx": "^2.20.0" + } }, "node_modules/base64-js": { "version": "1.5.1", @@ -1911,9 +2801,9 @@ ] }, "node_modules/basic-ftp": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.0.3.tgz", - "integrity": "sha512-QHX8HLlncOLpy54mh+k/sWIFd0ThmRqwe9ZjELybGZK+tZ8rUb9VO0saKJUROTbE+KhzDUT7xziGpGrW8Kmd+g==", + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.0.5.tgz", + "integrity": "sha512-4Bcg1P8xhUuqcii/S0Z9wiHIrQVPMermM1any+MX5GeGD7faD3/msQUDGLol9wOcz4/jbg/WJnGqoJF6LiBdtg==", "dev": true, "engines": { "node": ">=10.0.0" @@ -1925,15 +2815,6 @@ "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==", "dev": true }, - "node_modules/bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", - "dev": true, - "dependencies": { - "tweetnacl": "^0.14.3" - } - }, "node_modules/binary-extensions": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", @@ -2053,9 +2934,9 @@ } }, "node_modules/browserslist": { - "version": "4.21.10", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.10.tgz", - "integrity": "sha512-bipEBdZfVH5/pwrvqc+Ub0kUPVfGUhlKxbvfD+z1BDnPEO/X98ruXGA1WP5ASpAFKan7Qr6j736IacbZQuAlKQ==", + "version": "4.23.3", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz", + "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==", "dev": true, "funding": [ { @@ -2072,10 +2953,10 @@ } ], "dependencies": { - "caniuse-lite": "^1.0.30001517", - "electron-to-chromium": "^1.4.477", - "node-releases": "^2.0.13", - "update-browserslist-db": "^1.0.11" + "caniuse-lite": "^1.0.30001646", + "electron-to-chromium": "^1.5.4", + "node-releases": "^2.0.18", + "update-browserslist-db": "^1.1.0" }, "bin": { "browserslist": "cli.js" @@ -2165,9 +3046,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001524", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001524.tgz", - "integrity": "sha512-Jj917pJtYg9HSJBF95HVX3Cdr89JUyLT4IZ8SvM5aDRni95swKgYi3TgYLH5hnGfPE/U1dg6IfZ50UsIlLkwSA==", + "version": "1.0.30001663", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001663.tgz", + "integrity": "sha512-o9C3X27GLKbLeTYZ6HBOLU1tsAcBZsLis28wrVzddShCS16RujjHp9GDHKZqrB3meE0YjhawvMFsGb/igqiPzA==", "dev": true, "funding": [ { @@ -2184,12 +3065,6 @@ } ] }, - "node_modules/caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==", - "dev": true - }, "node_modules/chalk": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", @@ -2250,12 +3125,14 @@ } }, "node_modules/chromium-bidi": { - "version": "0.4.16", - "resolved": "https://registry.npmjs.org/chromium-bidi/-/chromium-bidi-0.4.16.tgz", - "integrity": "sha512-7ZbXdWERxRxSwo3txsBjjmc/NLxqb1Bk30mRb0BMS4YIaiV6zvKZqL/UAH+DdqcDYayDWk2n/y8klkBDODrPvA==", + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/chromium-bidi/-/chromium-bidi-0.6.5.tgz", + "integrity": "sha512-RuLrmzYrxSb0s9SgpB+QN5jJucPduZQ/9SIe76MDxYJuecPW5mxMdacJ1f4EtgiV+R0p3sCkznTMvH0MPGFqjA==", "dev": true, "dependencies": { - "mitt": "3.0.0" + "mitt": "3.0.1", + "urlpattern-polyfill": "10.0.0", + "zod": "3.23.8" }, "peerDependencies": { "devtools-protocol": "*" @@ -2273,15 +3150,6 @@ "node": ">= 10.0" } }, - "node_modules/clean-css/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/cliui": { "version": "8.0.1", "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", @@ -2466,19 +3334,18 @@ "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", "dev": true }, - "node_modules/cookiejar": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/cookiejar/-/cookiejar-2.1.4.tgz", - "integrity": "sha512-LDx6oHrK+PhzLKJU9j5S7/Y3jM/mUHvD/DeI1WQmJn652iPC5Y4TBzC9l+5OMOXlyTTA+SmVUPm0HQUwpD5Jqw==", - "dev": true - }, - "node_modules/core-js": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.12.tgz", - "integrity": "sha512-Kb2wC0fvsWfQrgk8HU5lW6U/Lcs8+9aaYcy4ZFc6DDlo4nZ7n70dEgE5rtR0oG6ufKDUnrwfWL1mXR5ljDatrQ==", - "deprecated": "core-js@<3.23.3 is no longer maintained and not recommended for usage due to the number of issues. Because of the V8 engine whims, feature detection in old core-js versions could cause a slowdown up to 100x even if nothing is polyfilled. Some versions have web compatibility issues. Please, upgrade your dependencies to the actual version of core-js.", + "node_modules/core-js-compat": { + "version": "3.38.1", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.38.1.tgz", + "integrity": "sha512-JRH6gfXxGmrzF3tZ57lFx97YARxCXPaMzPo6jELZhv88pBH5VXpQ+y0znKGlFnzuaihqhLbefxSJxWJMPtfDzw==", "dev": true, - "hasInstallScript": true + "dependencies": { + "browserslist": "^4.23.3" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } }, "node_modules/core-util-is": { "version": "1.0.2", @@ -2486,39 +3353,12 @@ "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==", "dev": true }, - "node_modules/cosmiconfig": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.2.0.tgz", - "integrity": "sha512-3rTMnFJA1tCOPwRxtgF4wd7Ab2qvDbL8jX+3smjIbS4HlZBagTlpERbdN7iAbWlrfxE3M8c27kTwTawQ7st+OQ==", - "dev": true, - "dependencies": { - "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", - "parse-json": "^5.0.0", - "path-type": "^4.0.0" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/d-fischer" - } - }, "node_modules/create-require": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", "dev": true }, - "node_modules/cross-fetch": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-4.0.0.tgz", - "integrity": "sha512-e4a5N8lVvuLgAWgnCrLr2PP0YyDOTHa9H/Rj54dirp61qXnNq46m82bRhNqIA5VccJtWBvPTFRV3TtvHUKPB1g==", - "dev": true, - "dependencies": { - "node-fetch": "^2.6.12" - } - }, "node_modules/cross-spawn": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", @@ -2561,22 +3401,10 @@ "url": "https://github.com/sponsors/fb55" } }, - "node_modules/dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", - "dev": true, - "dependencies": { - "assert-plus": "^1.0.0" - }, - "engines": { - "node": ">=0.10" - } - }, "node_modules/data-uri-to-buffer": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-5.0.1.tgz", - "integrity": "sha512-a9l6T1qqDogvvnw0nKlfZzqsyikEBZBClF39V3TFoKhDtGBqHu2HkuomJc02j5zft8zrUaXEuoicLeW54RkzPg==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-6.0.2.tgz", + "integrity": "sha512-7hvf7/GW8e86rW0ptuwS3OcBGDjIi6SZva7hCyWC0yYry2cOPmLIjXAUHI6DK2HsnwJd9ifmt57i8eV2n4YNpw==", "dev": true, "engines": { "node": ">= 14" @@ -2662,18 +3490,6 @@ "npm": "1.2.8000 || >= 1.4.16" } }, - "node_modules/detect-indent": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-4.0.0.tgz", - "integrity": "sha512-BDKtmHlOzwI7iRuEkhzsnPoi5ypEhWAJB5RvHWe1kMr06js3uK5B3734i3ui5Yd+wOJV1cpE4JnivPD283GU/A==", - "dev": true, - "dependencies": { - "repeating": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/detect-node": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", @@ -2681,9 +3497,9 @@ "dev": true }, "node_modules/devtools-protocol": { - "version": "0.0.1147663", - "resolved": "https://registry.npmjs.org/devtools-protocol/-/devtools-protocol-0.0.1147663.tgz", - "integrity": "sha512-hyWmRrexdhbZ1tcJUGpO95ivbRhWXz++F4Ko+n21AY5PNln2ovoJw+8ZMNDTtip+CNFQfrtLVh/w4009dXO/eQ==", + "version": "0.0.1342118", + "resolved": "https://registry.npmjs.org/devtools-protocol/-/devtools-protocol-0.0.1342118.tgz", + "integrity": "sha512-75fMas7PkYNDTmDyb6PRJCH7ILmHLp+BhrZGeMsa4bCh40DTxgCz2NRy5UDzII4C5KuD0oBMZ9vXKhEl6UD/3w==", "dev": true }, "node_modules/diff": { @@ -2793,16 +3609,6 @@ "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==", "dev": true }, - "node_modules/ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", - "dev": true, - "dependencies": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, "node_modules/ee-first": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", @@ -2810,9 +3616,9 @@ "dev": true }, "node_modules/electron-to-chromium": { - "version": "1.4.506", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.506.tgz", - "integrity": "sha512-xxGct4GPAKSRlrLBtJxJFYy74W11zX6PO9GyHgl/U+2s3Dp0ZEwAklDfNHXOWcvH7zWMpsmgbR0ggEuaYAVvHA==", + "version": "1.5.27", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.27.tgz", + "integrity": "sha512-o37j1vZqCoEgBuWWXLHQgTN/KDKe7zwpiY5CPeq2RvUqOyJw9xnrULzZAEVQ5p4h+zjMk7hgtOoPdnLxr7m/jw==", "dev": true }, "node_modules/emoji-regex": { @@ -2861,6 +3667,15 @@ "url": "https://github.com/fb55/entities?sponsor=1" } }, + "node_modules/env-paths": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", + "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/envinfo": { "version": "7.10.0", "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.10.0.tgz", @@ -2889,9 +3704,9 @@ "dev": true }, "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", "dev": true, "engines": { "node": ">=6" @@ -2933,16 +3748,6 @@ "source-map": "~0.6.1" } }, - "node_modules/escodegen/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "optional": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/eslint-scope": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", @@ -3160,12 +3965,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", - "dev": true - }, "node_modules/extract-zip": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-2.0.1.tgz", @@ -3186,15 +3985,6 @@ "@types/yauzl": "^2.9.1" } }, - "node_modules/extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==", - "dev": true, - "engines": [ - "node >=0.6.0" - ] - }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -3222,19 +4012,6 @@ "node": ">= 4.9.1" } }, - "node_modules/faye": { - "version": "0.8.11", - "resolved": "https://registry.npmjs.org/faye/-/faye-0.8.11.tgz", - "integrity": "sha512-d2SXlWy+wR8D2AgYjCnJrA8v4RvwKeRQeTB2aLUetyhrNKTU28mAvSMezSZDNyOONVrsF0IY1s4625QgggM2XA==", - "dev": true, - "dependencies": { - "cookiejar": "", - "faye-websocket": ">=0.4.0" - }, - "engines": { - "node": ">=0.1.96" - } - }, "node_modules/faye-websocket": { "version": "0.11.4", "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", @@ -3353,43 +4130,6 @@ } } }, - "node_modules/force": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/force/-/force-0.0.3.tgz", - "integrity": "sha512-B/4gl3/7o8Q4jYfXNKSvTHlAPxB1ruYCkxVkiVUUuHziYbDa2NsURljSgpm+Q+d4cGmN1EaAD5QXhLodGN44zA==", - "dev": true, - "dependencies": { - "faye": "~0.8.3", - "mime": "~1.2.9", - "request": "*" - }, - "engines": { - "node": "*" - } - }, - "node_modules/forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "dev": true, - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 0.12" - } - }, "node_modules/forwarded": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", @@ -3415,17 +4155,17 @@ "dev": true }, "node_modules/fs-extra": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", - "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", + "version": "11.2.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz", + "integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==", "dev": true, "dependencies": { "graceful-fs": "^4.2.0", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" }, "engines": { - "node": ">=6 <7 || >=8" + "node": ">=14.14" } }, "node_modules/fs-monkey": { @@ -3465,7 +4205,6 @@ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", "dev": true, - "peer": true, "engines": { "node": ">=6.9.0" } @@ -3510,29 +4249,20 @@ } }, "node_modules/get-uri": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/get-uri/-/get-uri-6.0.1.tgz", - "integrity": "sha512-7ZqONUVqaabogsYNWlYj0t3YZaL6dhuEueZXGF+/YVmf6dHmaFg8/6psJKqhx9QykIDKzpGcy2cn4oV4YC7V/Q==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/get-uri/-/get-uri-6.0.3.tgz", + "integrity": "sha512-BzUrJBS9EcUb4cFol8r4W3v1cPsSyajLSthNkz5BxbpDcHN5tIrM10E2eNvfnvBn3DaT3DUgx0OpsBKkaOpanw==", "dev": true, "dependencies": { "basic-ftp": "^5.0.2", - "data-uri-to-buffer": "^5.0.1", + "data-uri-to-buffer": "^6.0.2", "debug": "^4.3.4", - "fs-extra": "^8.1.0" + "fs-extra": "^11.2.0" }, "engines": { "node": ">= 14" } }, - "node_modules/getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", - "dev": true, - "dependencies": { - "assert-plus": "^1.0.0" - } - }, "node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", @@ -3576,7 +4306,6 @@ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", "dev": true, - "peer": true, "engines": { "node": ">=4" } @@ -3593,29 +4322,6 @@ "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==", "dev": true }, - "node_modules/har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/har-validator": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", - "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", - "deprecated": "this library is no longer supported", - "dev": true, - "dependencies": { - "ajv": "^6.12.3", - "har-schema": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, "node_modules/has": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", @@ -3628,27 +4334,6 @@ "node": ">= 0.4.0" } }, - "node_modules/has-ansi": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", - "integrity": "sha512-C8vBJ8DwUCx19vhm7urhTuUsr4/IyP6l4VzNQDv+ryHQObW3TTTp9yB68WpYgRe2bbaGuZ/se74IqFeVnMnLZg==", - "dev": true, - "dependencies": { - "ansi-regex": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-ansi/node_modules/ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha512-TIGnTpdo+E3+pCyAluZvtED5p5wCqLdezCyhPZzKPcxvFplEt4i+W7OONCKgeZFT3+y5NZZfOOS/Bdcanm1MYA==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", @@ -3691,19 +4376,6 @@ "he": "bin/he" } }, - "node_modules/home-or-tmp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/home-or-tmp/-/home-or-tmp-2.0.0.tgz", - "integrity": "sha512-ycURW7oUxE2sNiPVw1HVEFsW+ecOpJ5zaj7eC0RlwhibhRBod20muUN8qu/gzx956YrLolVvs1MTXwKgC2rVEg==", - "dev": true, - "dependencies": { - "os-homedir": "^1.0.0", - "os-tmpdir": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/hpack.js": { "version": "2.1.6", "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", @@ -3868,9 +4540,9 @@ } }, "node_modules/http-proxy-agent": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.0.tgz", - "integrity": "sha512-+ZT+iBxVUQ1asugqnD6oWoRiS25AkjNfG085dKJGtGxkdwLQrMKU5wJr2bOOFAXzKcTuqq+7fZlTMgG3SRfIYQ==", + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", "dev": true, "dependencies": { "agent-base": "^7.1.0", @@ -3904,25 +4576,10 @@ } } }, - "node_modules/http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==", - "dev": true, - "dependencies": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - }, - "engines": { - "node": ">=0.8", - "npm": ">=1.3.7" - } - }, "node_modules/https-proxy-agent": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.1.tgz", - "integrity": "sha512-Eun8zV0kcYS1g19r78osiQLEFIRspRUDd9tIfBCTBPBeMieF/EsJNL8VI3xOIdYRDEkjQnqOYPsZ2DsWsVsFwQ==", + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.5.tgz", + "integrity": "sha512-1e4Wqeblerz+tMKPIq2EMGiiWW1dIjZOksyHWSUm1rmuvw/how9hBHZ38lAGj5ID4Ik6EdkOw7NmWPy6LAwalw==", "dev": true, "dependencies": { "agent-base": "^7.0.2", @@ -4106,21 +4763,19 @@ "node": ">=10.13.0" } }, - "node_modules/invariant": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "node_modules/ip-address": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-9.0.5.tgz", + "integrity": "sha512-zHtQzGojZXTwZTHQqra+ETKd4Sn3vgi7uBmlPoXVWZqYvuKmtI0l/VZTjqGmJY9x88GGOaZ9+G9ES8hC4T4X8g==", "dev": true, "dependencies": { - "loose-envify": "^1.0.0" + "jsbn": "1.1.0", + "sprintf-js": "^1.1.3" + }, + "engines": { + "node": ">= 12" } }, - "node_modules/ip": { - "version": "1.1.8", - "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.8.tgz", - "integrity": "sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg==", - "dev": true - }, "node_modules/ipaddr.js": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz", @@ -4184,18 +4839,6 @@ "node": ">=0.10.0" } }, - "node_modules/is-finite": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.1.0.tgz", - "integrity": "sha512-cdyMtqX/BOqqNBBiKlIVkytNHm49MtMlYyn1zxzvJKWmFMlGzm+ry5BBfYyeY9YmNKbRSo/o7OX9w9ale0wg3w==", - "dev": true, - "engines": { - "node": ">=0.10.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", @@ -4262,12 +4905,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==", - "dev": true - }, "node_modules/is-wsl": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", @@ -4301,12 +4938,6 @@ "node": ">=0.10.0" } }, - "node_modules/isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==", - "dev": true - }, "node_modules/jest-worker": { "version": "27.5.1", "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", @@ -4377,9 +5008,9 @@ } }, "node_modules/jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz", + "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A==", "dev": true }, "node_modules/jsesc": { @@ -4387,7 +5018,6 @@ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", "dev": true, - "peer": true, "bin": { "jsesc": "bin/jsesc" }, @@ -4401,30 +5031,17 @@ "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", "dev": true }, - "node_modules/json-schema": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", - "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==", - "dev": true - }, "node_modules/json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true }, - "node_modules/json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==", - "dev": true - }, "node_modules/json5": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", "dev": true, - "peer": true, "bin": { "json5": "lib/cli.js" }, @@ -4433,27 +5050,15 @@ } }, "node_modules/jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", - "dev": true, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/jsprim": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz", - "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", "dev": true, "dependencies": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.4.0", - "verror": "1.10.0" + "universalify": "^2.0.0" }, - "engines": { - "node": ">=0.6.0" + "optionalDependencies": { + "graceful-fs": "^4.1.6" } }, "node_modules/kind-of": { @@ -4520,17 +5125,11 @@ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", "dev": true }, - "node_modules/loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", - "dev": true, - "dependencies": { - "js-tokens": "^3.0.0 || ^4.0.0" - }, - "bin": { - "loose-envify": "cli.js" - } + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==", + "dev": true }, "node_modules/lower-case": { "version": "2.0.2", @@ -4546,7 +5145,6 @@ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", "dev": true, - "peer": true, "dependencies": { "yallist": "^3.0.2" } @@ -4618,12 +5216,6 @@ "node": ">=8.6" } }, - "node_modules/mime": { - "version": "1.2.11", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.2.11.tgz", - "integrity": "sha512-Ysa2F/nqTNGHhhm9MV8ure4+Hc+Y8AWiqUdHxsO7xu8zc92ND9f3kpALHjaP026Ft17UfxrMt95c50PLUeynBw==", - "dev": true - }, "node_modules/mime-db": { "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", @@ -4682,27 +5274,9 @@ } }, "node_modules/mitt": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mitt/-/mitt-3.0.0.tgz", - "integrity": "sha512-7dX2/10ITVyqh4aOSVI9gdape+t9l2/8QxHrFmUXu4EEUpdlxl6RudZUPZoc+zuY2hk1j7XxVroIVIan/pD/SQ==", - "dev": true - }, - "node_modules/mkdirp": { - "version": "0.5.6", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", - "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", - "dev": true, - "dependencies": { - "minimist": "^1.2.6" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/mkdirp-classic": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", - "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mitt/-/mitt-3.0.1.tgz", + "integrity": "sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==", "dev": true }, "node_modules/ms": { @@ -4755,27 +5329,7 @@ "dev": true, "dependencies": { "lower-case": "^2.0.2", - "tslib": "^2.0.3" - } - }, - "node_modules/node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "dev": true, - "dependencies": { - "whatwg-url": "^5.0.0" - }, - "engines": { - "node": "4.x || >=6.0.0" - }, - "peerDependencies": { - "encoding": "^0.1.0" - }, - "peerDependenciesMeta": { - "encoding": { - "optional": true - } + "tslib": "^2.0.3" } }, "node_modules/node-forge": { @@ -4788,9 +5342,9 @@ } }, "node_modules/node-releases": { - "version": "2.0.13", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz", - "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==", + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", + "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==", "dev": true }, "node_modules/normalize-path": { @@ -4826,15 +5380,6 @@ "url": "https://github.com/fb55/nth-check?sponsor=1" } }, - "node_modules/oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", - "dev": true, - "engines": { - "node": "*" - } - }, "node_modules/object-inspect": { "version": "1.12.3", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", @@ -4912,24 +5457,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/os-homedir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz", - "integrity": "sha512-B5JU3cabzk8c67mRRd3ECmROafjYMXbuzlwtqdM8IbS8ktlTix8aFGb2bAGKrSRIlnfKwovGUUr72JUPyOb6kQ==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/os-tmpdir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", - "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/p-limit": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", @@ -4983,9 +5510,9 @@ } }, "node_modules/pac-proxy-agent": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-7.0.0.tgz", - "integrity": "sha512-t4tRAMx0uphnZrio0S0Jw9zg3oDbz1zVhQ/Vy18FjLfP1XOLNUEjaVxYCYRI6NS+BsMBXKIzV6cTLOkO9AtywA==", + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-7.0.2.tgz", + "integrity": "sha512-BFi3vZnO9X5Qt6NRz7ZOaPja3ic0PhlsmCRYLOpN11+mWBCR6XJDqW5RF3j8jm4WGGQZtBA+bTfxYzeKW73eHg==", "dev": true, "dependencies": { "@tootallnate/quickjs-emscripten": "^0.23.0", @@ -4993,22 +5520,21 @@ "debug": "^4.3.4", "get-uri": "^6.0.1", "http-proxy-agent": "^7.0.0", - "https-proxy-agent": "^7.0.0", - "pac-resolver": "^7.0.0", - "socks-proxy-agent": "^8.0.1" + "https-proxy-agent": "^7.0.5", + "pac-resolver": "^7.0.1", + "socks-proxy-agent": "^8.0.4" }, "engines": { "node": ">= 14" } }, "node_modules/pac-resolver": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/pac-resolver/-/pac-resolver-7.0.0.tgz", - "integrity": "sha512-Fd9lT9vJbHYRACT8OhCbZBbxr6KRSawSovFpy8nDGshaK99S/EBhVIHp9+crhxrsZOuvLpgL1n23iyPg6Rl2hg==", + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/pac-resolver/-/pac-resolver-7.0.1.tgz", + "integrity": "sha512-5NPgf87AT2STgwa2ntRMr45jTKrYBGkVU36yT0ig/n/GMAa3oPqhZfIQ2kMEimReg0+t9kZViDVZ83qfVUlckg==", "dev": true, "dependencies": { "degenerator": "^5.0.0", - "ip": "^1.1.8", "netmask": "^2.0.2" }, "engines": { @@ -5113,15 +5639,6 @@ "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==", "dev": true }, - "node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, "node_modules/pause-stream": { "version": "0.0.11", "resolved": "https://registry.npmjs.org/pause-stream/-/pause-stream-0.0.11.tgz", @@ -5137,16 +5654,10 @@ "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==", "dev": true }, - "node_modules/performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==", - "dev": true - }, "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", + "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==", "dev": true }, "node_modules/picomatch": { @@ -5186,15 +5697,6 @@ "renderkid": "^3.0.0" } }, - "node_modules/private": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/private/-/private-0.1.8.tgz", - "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==", - "dev": true, - "engines": { - "node": ">= 0.6" - } - }, "node_modules/process-nextick-args": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", @@ -5233,19 +5735,19 @@ } }, "node_modules/proxy-agent": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/proxy-agent/-/proxy-agent-6.3.0.tgz", - "integrity": "sha512-0LdR757eTj/JfuU7TL2YCuAZnxWXu3tkJbg4Oq3geW/qFNT/32T0sp2HnZ9O0lMR4q3vwAt0+xCA8SR0WAD0og==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/proxy-agent/-/proxy-agent-6.4.0.tgz", + "integrity": "sha512-u0piLU+nCOHMgGjRbimiXmA9kM/L9EHh3zL81xCdp7m+Y2pHIsnmbdDoEDoAz5geaonNR6q6+yOPQs6n4T6sBQ==", "dev": true, "dependencies": { "agent-base": "^7.0.2", "debug": "^4.3.4", - "http-proxy-agent": "^7.0.0", - "https-proxy-agent": "^7.0.0", + "http-proxy-agent": "^7.0.1", + "https-proxy-agent": "^7.0.3", "lru-cache": "^7.14.1", - "pac-proxy-agent": "^7.0.0", + "pac-proxy-agent": "^7.0.1", "proxy-from-env": "^1.1.0", - "socks-proxy-agent": "^8.0.1" + "socks-proxy-agent": "^8.0.2" }, "engines": { "node": ">= 14" @@ -5281,16 +5783,10 @@ "node": ">= 0.10" } }, - "node_modules/psl": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", - "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==", - "dev": true - }, "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.2.tgz", + "integrity": "sha512-tUPXtzlGM8FE3P0ZL6DVs/3P58k9nk8/jZeQCurTJylQA8qFYzHFfhBJkuqyE0FifOsQ0uKWekiZ5g8wtr28cw==", "dev": true, "dependencies": { "end-of-stream": "^1.1.0", @@ -5307,38 +5803,85 @@ } }, "node_modules/puppeteer": { - "version": "20.9.0", - "resolved": "https://registry.npmjs.org/puppeteer/-/puppeteer-20.9.0.tgz", - "integrity": "sha512-kAglT4VZ9fWEGg3oLc4/de+JcONuEJhlh3J6f5R1TLkrY/EHHIHxWXDOzXvaxQCtedmyVXBwg8M+P8YCO/wZjw==", + "version": "23.4.0", + "resolved": "https://registry.npmjs.org/puppeteer/-/puppeteer-23.4.0.tgz", + "integrity": "sha512-FxgFFJI7NAsX8uebiEDSjS86vufz9TaqERQHShQT0lCbSRI3jUPEcz/0HdwLiYvfYNsc1zGjqY3NsGZya4PvUA==", "dev": true, "hasInstallScript": true, "dependencies": { - "@puppeteer/browsers": "1.4.6", - "cosmiconfig": "8.2.0", - "puppeteer-core": "20.9.0" + "@puppeteer/browsers": "2.4.0", + "chromium-bidi": "0.6.5", + "cosmiconfig": "^9.0.0", + "devtools-protocol": "0.0.1342118", + "puppeteer-core": "23.4.0", + "typed-query-selector": "^2.12.0" + }, + "bin": { + "puppeteer": "lib/cjs/puppeteer/node/cli.js" }, "engines": { - "node": ">=16.3.0" + "node": ">=18" } }, "node_modules/puppeteer-core": { - "version": "20.9.0", - "resolved": "https://registry.npmjs.org/puppeteer-core/-/puppeteer-core-20.9.0.tgz", - "integrity": "sha512-H9fYZQzMTRrkboEfPmf7m3CLDN6JvbxXA3qTtS+dFt27tR+CsFHzPsT6pzp6lYL6bJbAPaR0HaPO6uSi+F94Pg==", + "version": "23.4.0", + "resolved": "https://registry.npmjs.org/puppeteer-core/-/puppeteer-core-23.4.0.tgz", + "integrity": "sha512-fqkIP5FOcb38jfBj/OcBz1wFaI9nk40uQKSORvnXws6wCbep2dg8yxZ3ddJxBIfQsxoiEOvnrykFinUScrB/ew==", "dev": true, "dependencies": { - "@puppeteer/browsers": "1.4.6", - "chromium-bidi": "0.4.16", - "cross-fetch": "4.0.0", - "debug": "4.3.4", - "devtools-protocol": "0.0.1147663", - "ws": "8.13.0" + "@puppeteer/browsers": "2.4.0", + "chromium-bidi": "0.6.5", + "debug": "^4.3.7", + "devtools-protocol": "0.0.1342118", + "typed-query-selector": "^2.12.0", + "ws": "^8.18.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/puppeteer-core/node_modules/debug": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", + "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "dev": true, + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/puppeteer-core/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true + }, + "node_modules/puppeteer/node_modules/cosmiconfig": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-9.0.0.tgz", + "integrity": "sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==", + "dev": true, + "dependencies": { + "env-paths": "^2.2.1", + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0" }, "engines": { - "node": ">=16.3.0" + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" }, "peerDependencies": { - "typescript": ">= 4.7.4" + "typescript": ">=4.9.5" }, "peerDependenciesMeta": { "typescript": { @@ -5346,13 +5889,19 @@ } } }, - "node_modules/qs": { - "version": "6.5.3", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", - "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==", + "node_modules/puppeteer/node_modules/typescript": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.2.tgz", + "integrity": "sha512-NW8ByodCSNCwZeghjN3o+JX5OFH0Ojg6sadjEKY4huZ52TqbJTJnDo5+Tw98lSy63NZvi4n+ez5m2u5d4PkZyw==", "dev": true, + "optional": true, + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, "engines": { - "node": ">=0.6" + "node": ">=14.17" } }, "node_modules/queue-tick": { @@ -5447,59 +5996,16 @@ "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==", "dev": true }, - "node_modules/regenerator-runtime": { - "version": "0.11.1", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz", - "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==", - "dev": true - }, - "node_modules/regenerator-transform": { - "version": "0.10.1", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.10.1.tgz", - "integrity": "sha512-PJepbvDbuK1xgIgnau7Y90cwaAmO/LCLMI2mPvaXq2heGMR3aWW5/BQvYrhJ8jgmQjXewXvBjzfqKcVOmhjZ6Q==", - "dev": true, - "dependencies": { - "babel-runtime": "^6.18.0", - "babel-types": "^6.19.0", - "private": "^0.1.6" - } - }, - "node_modules/regexpu-core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-2.0.0.tgz", - "integrity": "sha512-tJ9+S4oKjxY8IZ9jmjnp/mtytu1u3iyIQAfmI51IKWH6bFf7XR1ybtaO6j7INhZKXOTYADk7V5qxaqLkmNxiZQ==", - "dev": true, - "dependencies": { - "regenerate": "^1.2.1", - "regjsgen": "^0.2.0", - "regjsparser": "^0.1.4" - } - }, - "node_modules/regjsgen": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.2.0.tgz", - "integrity": "sha512-x+Y3yA24uF68m5GA+tBjbGYo64xXVJpbToBaWCoSNSc1hdk6dfctaRWrNFTVJZIIhL5GxW8zwjoixbnifnK59g==", - "dev": true - }, - "node_modules/regjsparser": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.1.5.tgz", - "integrity": "sha512-jlQ9gYLfk2p3V5Ag5fYhA7fv7OHzd1KUH0PRP46xc3TgwjwgROIW572AfYg/X9kaNq/LJnu6oJcFRXlIrGoTRw==", + "node_modules/regenerate-unicode-properties": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.0.tgz", + "integrity": "sha512-DqHn3DwbmmPVzeKj9woBadqmXxLvQoQIwu7nopMc72ztvxVmVk2SBhSnx67zuye5TP+lJsb/TBQsjLKhnDf3MA==", "dev": true, "dependencies": { - "jsesc": "~0.5.0" + "regenerate": "^1.4.2" }, - "bin": { - "regjsparser": "bin/parser" - } - }, - "node_modules/regjsparser/node_modules/jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", - "dev": true, - "bin": { - "jsesc": "bin/jsesc" + "engines": { + "node": ">=4" } }, "node_modules/relateurl": { @@ -5524,50 +6030,6 @@ "strip-ansi": "^6.0.1" } }, - "node_modules/repeating": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/repeating/-/repeating-2.0.1.tgz", - "integrity": "sha512-ZqtSMuVybkISo2OWvqvm7iHSWngvdaW3IpsT9/uP8v4gMi591LY6h35wdOfvQdWCKFWZWm2Y1Opp4kV7vQKT6A==", - "dev": true, - "dependencies": { - "is-finite": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/request": { - "version": "2.88.2", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", - "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", - "deprecated": "request has been deprecated, see https://github.com/request/request/issues/3142", - "dev": true, - "dependencies": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, - "engines": { - "node": ">= 6" - } - }, "node_modules/require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", @@ -5774,7 +6236,6 @@ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, - "peer": true, "bin": { "semver": "bin/semver.js" } @@ -6006,15 +6467,6 @@ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "dev": true }, - "node_modules/slash": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", - "integrity": "sha512-3TYDR7xWt4dIqV2JauJr+EJeW356RXijHeUlO+8djJ+uBXPn8/2dpzBc8yQhh583sVvc9CvFAeQVgijsH+PNNg==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/smart-buffer": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", @@ -6046,57 +6498,42 @@ } }, "node_modules/socks": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/socks/-/socks-2.7.1.tgz", - "integrity": "sha512-7maUZy1N7uo6+WVEX6psASxtNlKaNVMlGQKkG/63nEDdLOWNbiUMoLK7X4uYoLhQstau72mLgfEWcXcwsaHbYQ==", + "version": "2.8.3", + "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.3.tgz", + "integrity": "sha512-l5x7VUUWbjVFbafGLxPWkYsHIhEvmF85tbIeFZWc8ZPtoMyybuEhL7Jye/ooC4/d48FgOjSJXgsF/AJPYCW8Zw==", "dev": true, "dependencies": { - "ip": "^2.0.0", + "ip-address": "^9.0.5", "smart-buffer": "^4.2.0" }, "engines": { - "node": ">= 10.13.0", + "node": ">= 10.0.0", "npm": ">= 3.0.0" } }, "node_modules/socks-proxy-agent": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.1.tgz", - "integrity": "sha512-59EjPbbgg8U3x62hhKOFVAmySQUcfRQ4C7Q/D5sEHnZTQRrQlNKINks44DMR1gwXp0p4LaVIeccX2KHTTcHVqQ==", + "version": "8.0.4", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.4.tgz", + "integrity": "sha512-GNAq/eg8Udq2x0eNiFkr9gRg5bA7PXEWagQdeRX4cPSG+X/8V38v637gim9bjFptMk1QWsCTr0ttrJEiXbNnRw==", "dev": true, "dependencies": { - "agent-base": "^7.0.1", + "agent-base": "^7.1.1", "debug": "^4.3.4", - "socks": "^2.7.1" + "socks": "^2.8.3" }, "engines": { "node": ">= 14" } }, - "node_modules/socks/node_modules/ip": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ip/-/ip-2.0.0.tgz", - "integrity": "sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ==", - "dev": true - }, "node_modules/source-map": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", - "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "dev": true, "engines": { "node": ">=0.10.0" } }, - "node_modules/source-map-support": { - "version": "0.4.18", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.4.18.tgz", - "integrity": "sha512-try0/JqxPLF9nOjvSta7tVondkP5dwgyLDjVoyMDlmjugT2lRZ1OfsrYTkCd2hkDnJTKRbO/Rl3orm8vlsUzbA==", - "dev": true, - "dependencies": { - "source-map": "^0.5.6" - } - }, "node_modules/spdy": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", @@ -6139,30 +6576,11 @@ "node": "*" } }, - "node_modules/sshpk": { - "version": "1.17.0", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.17.0.tgz", - "integrity": "sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==", - "dev": true, - "dependencies": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - }, - "bin": { - "sshpk-conv": "bin/sshpk-conv", - "sshpk-sign": "bin/sshpk-sign", - "sshpk-verify": "bin/sshpk-verify" - }, - "engines": { - "node": ">=0.10.0" - } + "node_modules/sprintf-js": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.3.tgz", + "integrity": "sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==", + "dev": true }, "node_modules/start-server-and-test": { "version": "2.0.3", @@ -6207,13 +6625,17 @@ } }, "node_modules/streamx": { - "version": "2.15.1", - "resolved": "https://registry.npmjs.org/streamx/-/streamx-2.15.1.tgz", - "integrity": "sha512-fQMzy2O/Q47rgwErk/eGeLu/roaFWV0jVsogDmrszM9uIw8L5OA+t+V93MgYlufNptfjmYR1tOMWhei/Eh7TQA==", + "version": "2.20.1", + "resolved": "https://registry.npmjs.org/streamx/-/streamx-2.20.1.tgz", + "integrity": "sha512-uTa0mU6WUC65iUvzKH4X9hEdvSW7rbPxPtwfWiLMSj3qTdQbAiUboZTxauKfpFuGIGa1C2BYijZ7wgdUXICJhA==", "dev": true, "dependencies": { - "fast-fifo": "^1.1.0", - "queue-tick": "^1.0.1" + "fast-fifo": "^1.3.2", + "queue-tick": "^1.0.1", + "text-decoder": "^1.1.0" + }, + "optionalDependencies": { + "bare-events": "^2.2.0" } }, "node_modules/string_decoder": { @@ -6294,20 +6716,23 @@ } }, "node_modules/tar-fs": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-3.0.4.tgz", - "integrity": "sha512-5AFQU8b9qLfZCX9zp2duONhPmZv0hGYiBPJsyUdqMjzq/mqVpy/rEUSeHk1+YitmxugaptgBh5oDGU3VsAJq4w==", + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-3.0.6.tgz", + "integrity": "sha512-iokBDQQkUyeXhgPYaZxmczGPhnhXZ0CmrqI+MOb/WFGS9DW5wnfrLgtjUJBvz50vQ3qfRwJ62QVoCFu8mPVu5w==", "dev": true, "dependencies": { - "mkdirp-classic": "^0.5.2", "pump": "^3.0.0", "tar-stream": "^3.1.5" + }, + "optionalDependencies": { + "bare-fs": "^2.1.1", + "bare-path": "^2.1.0" } }, "node_modules/tar-stream": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.6.tgz", - "integrity": "sha512-B/UyjYwPpMBv+PaFSWAmtYjwdrlEaZQEhMIBFNC5oEG8lpiW8XjcSdmEaClj28ArfKScKHs2nshz3k2le6crsg==", + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.7.tgz", + "integrity": "sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==", "dev": true, "dependencies": { "b4a": "^1.6.4", @@ -6391,15 +6816,6 @@ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", "dev": true }, - "node_modules/terser/node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/terser/node_modules/source-map-support": { "version": "0.5.21", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", @@ -6410,6 +6826,15 @@ "source-map": "^0.6.0" } }, + "node_modules/text-decoder": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/text-decoder/-/text-decoder-1.2.0.tgz", + "integrity": "sha512-n1yg1mOj9DNpk3NeZOx7T6jchTbyJS3i3cucbNN6FcdPriMZx7NsgrGpWWdWZZGxD7ES1XB+3uoqHMgOKaN+fg==", + "dev": true, + "dependencies": { + "b4a": "^1.6.4" + } + }, "node_modules/through": { "version": "2.3.8", "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", @@ -6427,7 +6852,6 @@ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", "dev": true, - "peer": true, "engines": { "node": ">=4" } @@ -6453,34 +6877,6 @@ "node": ">=0.6" } }, - "node_modules/tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "dev": true, - "dependencies": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", - "dev": true - }, - "node_modules/trim-right": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/trim-right/-/trim-right-1.0.1.tgz", - "integrity": "sha512-WZGXGstmCWgeevgTL54hrCuw1dyMQIzWy7ZfqRJfSmJZBwklI15egmQytFP6bPidmw3M8d5yEowl1niq4vmqZw==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/ts-loader": { "version": "9.4.4", "resolved": "https://registry.npmjs.org/ts-loader/-/ts-loader-9.4.4.tgz", @@ -6658,24 +7054,6 @@ "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==", "dev": true }, - "node_modules/tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", - "dev": true, - "dependencies": { - "safe-buffer": "^5.0.1" - }, - "engines": { - "node": "*" - } - }, - "node_modules/tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==", - "dev": true - }, "node_modules/type-is": { "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", @@ -6689,6 +7067,12 @@ "node": ">= 0.6" } }, + "node_modules/typed-query-selector": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/typed-query-selector/-/typed-query-selector-2.12.0.tgz", + "integrity": "sha512-SbklCd1F0EiZOyPiW192rrHZzZ5sBijB6xM+cpmrwDqObvdtunOHHIk9fCGsoK5JVIYXoyEp4iEdE3upFH3PAg==", + "dev": true + }, "node_modules/typescript": { "version": "4.7.4", "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.7.4.tgz", @@ -6712,13 +7096,53 @@ "through": "^2.3.8" } }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.1.tgz", + "integrity": "sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "dev": true, + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.2.0.tgz", + "integrity": "sha512-4IehN3V/+kkr5YeSSDDQG8QLqO26XpL2XP3GQtqwlT/QYSECAwFztxVHjlbh0+gjJ3XmNLS0zDsbgs9jWKExLg==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "dev": true, + "engines": { + "node": ">=4" + } + }, "node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", "dev": true, "engines": { - "node": ">= 4.0.0" + "node": ">= 10.0.0" } }, "node_modules/unpipe": { @@ -6731,9 +7155,9 @@ } }, "node_modules/update-browserslist-db": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", - "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz", + "integrity": "sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==", "dev": true, "funding": [ { @@ -6750,8 +7174,8 @@ } ], "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" + "escalade": "^3.1.2", + "picocolors": "^1.0.1" }, "bin": { "update-browserslist-db": "cli.js" @@ -6769,6 +7193,12 @@ "punycode": "^2.1.0" } }, + "node_modules/urlpattern-polyfill": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/urlpattern-polyfill/-/urlpattern-polyfill-10.0.0.tgz", + "integrity": "sha512-H/A06tKD7sS1O1X2SshBVeA5FLycRpjqiBeqGKmBwBDBy28EnRjORxTNe269KSSr5un5qyWi1iL61wLxpd+ZOg==", + "dev": true + }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", @@ -6790,16 +7220,6 @@ "node": ">= 0.4.0" } }, - "node_modules/uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", - "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", - "dev": true, - "bin": { - "uuid": "bin/uuid" - } - }, "node_modules/v8-compile-cache-lib": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", @@ -6815,20 +7235,6 @@ "node": ">= 0.8" } }, - "node_modules/verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==", - "dev": true, - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, "node_modules/wait-on": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/wait-on/-/wait-on-7.2.0.tgz", @@ -6870,12 +7276,6 @@ "minimalistic-assert": "^1.0.0" } }, - "node_modules/webidl-conversions": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", - "dev": true - }, "node_modules/webpack": { "version": "5.88.2", "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.88.2.tgz", @@ -7122,16 +7522,6 @@ "node": ">=0.8.0" } }, - "node_modules/whatwg-url": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", - "dev": true, - "dependencies": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", @@ -7210,9 +7600,9 @@ "dev": true }, "node_modules/ws": { - "version": "8.13.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz", - "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==", + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", + "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", "dev": true, "engines": { "node": ">=10.0.0" @@ -7243,13 +7633,12 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", - "dev": true, - "peer": true + "dev": true }, "node_modules/yargs": { - "version": "17.7.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.1.tgz", - "integrity": "sha512-cwiTb08Xuv5fqF4AovYacTFNxk62th7LKJ6BL9IGUpTJrWoU7/7WdQGTP2SjKf1dUNBGzDd28p/Yfs/GI6JrLw==", + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", "dev": true, "dependencies": { "cliui": "^8.0.1", @@ -7303,6 +7692,15 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "node_modules/zod": { + "version": "3.23.8", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.23.8.tgz", + "integrity": "sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } } } } diff --git a/ecosystem-tests/ts-browser-webpack/package.json b/ecosystem-tests/ts-browser-webpack/package.json index ac251790f..921495cb0 100644 --- a/ecosystem-tests/ts-browser-webpack/package.json +++ b/ecosystem-tests/ts-browser-webpack/package.json @@ -5,23 +5,23 @@ "description": "ts-browser-webpack", "scripts": { "tsc": "tsc", - "serve": "webpack-cli serve", + "serve": "webpack serve", "build": "webpack", "test": "ts-node src/test.ts", "test:ci": "start-server-and-test serve http://localhost:8080 test" }, "devDependencies": { - "babel-core": "^6.26.3", + "@babel/core": "^7.21.0", + "@babel/preset-env": "^7.21.0", + "@babel/preset-typescript": "^7.21.0", "babel-loader": "^9.1.2", - "babel-preset-es2015": "^6.24.1", "fastest-levenshtein": "^1.0.16", - "force": "^0.0.3", "html-webpack-plugin": "^5.5.3", - "puppeteer": "^20.8.3", + "puppeteer": "^23.4.0", "start-server-and-test": "^2.0.0", "ts-loader": "^9.4.3", "ts-node": "^10.9.1", - "typescript": "4.7.4", + "typescript": "^4.7.4", "webpack": "^5.87.0", "webpack-cli": "^5.0.2", "webpack-dev-server": "^4.15.1" diff --git a/ecosystem-tests/ts-browser-webpack/webpack.config.js b/ecosystem-tests/ts-browser-webpack/webpack.config.js index 4dec6efb4..0b5c3c7d5 100644 --- a/ecosystem-tests/ts-browser-webpack/webpack.config.js +++ b/ecosystem-tests/ts-browser-webpack/webpack.config.js @@ -25,13 +25,13 @@ module.exports = { { test: /\.ts$/, exclude: /node_modules/, - loader: 'ts-loader', + use: 'ts-loader', }, ], }, resolve: { - extensions: ['*', '.js', '.ts'], + extensions: ['.js', '.ts'], }, devtool: 'eval', @@ -42,4 +42,12 @@ module.exports = { filename: 'index.html', }), ], + + devServer: { + static: { + directory: publicPath, + }, + compress: true, + port: 8080, + }, }; From 9f86d2776d8c5578d46b23a2fc313e6fb13c70a0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 12:02:33 +0000 Subject: [PATCH 268/533] fix(audio): correct response_format translations type (#1097) --- .stats.yml | 2 +- api.md | 1 + src/index.ts | 1 + src/resources/audio/audio.ts | 7 +++++++ src/resources/audio/index.ts | 2 +- src/resources/audio/transcriptions.ts | 6 +++--- src/resources/audio/translations.ts | 6 +++--- src/resources/index.ts | 2 +- tests/api-resources/audio/translations.test.ts | 2 +- 9 files changed, 19 insertions(+), 10 deletions(-) diff --git a/.stats.yml b/.stats.yml index 0151c5a10..e8bca3c6d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-de1981b64ac229493473670d618500c6362c195f1057eb7de00bd1bc9184fbd5.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-073331021d48db6af646a3552ab0c682efe31b7fb4e59a109ed1ba539f9b89c5.yml diff --git a/api.md b/api.md index f38ab69be..39cf72f1d 100644 --- a/api.md +++ b/api.md @@ -108,6 +108,7 @@ Methods: Types: - AudioModel +- AudioResponseFormat ## Transcriptions diff --git a/src/index.ts b/src/index.ts index 7fed1dc8c..55f603608 100644 --- a/src/index.ts +++ b/src/index.ts @@ -298,6 +298,7 @@ export namespace OpenAI { export import Audio = API.Audio; export import AudioModel = API.AudioModel; + export import AudioResponseFormat = API.AudioResponseFormat; export import Moderations = API.Moderations; export import Moderation = API.Moderation; diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index 1f0269d03..a8b35d986 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -14,8 +14,15 @@ export class Audio extends APIResource { export type AudioModel = 'whisper-1'; +/** + * The format of the output, in one of these options: `json`, `text`, `srt`, + * `verbose_json`, or `vtt`. + */ +export type AudioResponseFormat = 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt'; + export namespace Audio { export import AudioModel = AudioAPI.AudioModel; + export import AudioResponseFormat = AudioAPI.AudioResponseFormat; export import Transcriptions = TranscriptionsAPI.Transcriptions; export import Transcription = TranscriptionsAPI.Transcription; export import TranscriptionCreateParams = TranscriptionsAPI.TranscriptionCreateParams; diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts index a7f935964..e8836470c 100644 --- a/src/resources/audio/index.ts +++ b/src/resources/audio/index.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { AudioModel, Audio } from './audio'; +export { AudioModel, AudioResponseFormat, Audio } from './audio'; export { SpeechModel, SpeechCreateParams, Speech } from './speech'; export { Transcription, TranscriptionCreateParams, Transcriptions } from './transcriptions'; export { Translation, TranslationCreateParams, Translations } from './translations'; diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 5c30d6c59..1ee6921cd 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -54,10 +54,10 @@ export interface TranscriptionCreateParams { prompt?: string; /** - * The format of the transcript output, in one of these options: `json`, `text`, - * `srt`, `verbose_json`, or `vtt`. + * The format of the output, in one of these options: `json`, `text`, `srt`, + * `verbose_json`, or `vtt`. */ - response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt'; + response_format?: AudioAPI.AudioResponseFormat; /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index dedc15b65..6df718112 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -40,10 +40,10 @@ export interface TranslationCreateParams { prompt?: string; /** - * The format of the transcript output, in one of these options: `json`, `text`, - * `srt`, `verbose_json`, or `vtt`. + * The format of the output, in one of these options: `json`, `text`, `srt`, + * `verbose_json`, or `vtt`. */ - response_format?: string; + response_format?: AudioAPI.AudioResponseFormat; /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the diff --git a/src/resources/index.ts b/src/resources/index.ts index 68bd88a31..87203ab39 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -2,7 +2,7 @@ export * from './chat/index'; export * from './shared'; -export { AudioModel, Audio } from './audio/audio'; +export { AudioModel, AudioResponseFormat, Audio } from './audio/audio'; export { Batch, BatchError, diff --git a/tests/api-resources/audio/translations.test.ts b/tests/api-resources/audio/translations.test.ts index 8264a5818..7966ff49a 100644 --- a/tests/api-resources/audio/translations.test.ts +++ b/tests/api-resources/audio/translations.test.ts @@ -28,7 +28,7 @@ describe('resource translations', () => { file: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'whisper-1', prompt: 'prompt', - response_format: 'response_format', + response_format: 'json', temperature: 0, }); }); From ee2bb62f055e588c5c8ac1a36348e52270f10c90 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 25 Sep 2024 13:15:06 +0100 Subject: [PATCH 269/533] chore(internal): fix ecosystem tests error output (#1096) --- ecosystem-tests/cli.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index 550512634..c03ea668a 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -506,8 +506,9 @@ async function run(command: string, args: string[], config?: RunOpts): Promise Date: Wed, 25 Sep 2024 14:31:47 +0000 Subject: [PATCH 270/533] feat(client): allow overriding retry count header (#1098) --- src/core.ts | 18 +++++++++++--- tests/index.test.ts | 58 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 3 deletions(-) diff --git a/src/core.ts b/src/core.ts index 877ae8de1..f0619c3d9 100644 --- a/src/core.ts +++ b/src/core.ts @@ -383,7 +383,11 @@ export abstract class APIClient { delete reqHeaders['content-type']; } - reqHeaders['x-stainless-retry-count'] = String(retryCount); + // Don't set the retry count header if it was already set or removed by the caller. We check `headers`, + // which can contain nulls, instead of `reqHeaders` to account for the removal case. + if (getHeader(headers, 'x-stainless-retry-count') === undefined) { + reqHeaders['x-stainless-retry-count'] = String(retryCount); + } this.validateHeaders(reqHeaders, headers); @@ -1170,7 +1174,15 @@ export const isHeadersProtocol = (headers: any): headers is HeadersProtocol => { return typeof headers?.get === 'function'; }; -export const getRequiredHeader = (headers: HeadersLike, header: string): string => { +export const getRequiredHeader = (headers: HeadersLike | Headers, header: string): string => { + const foundHeader = getHeader(headers, header); + if (foundHeader === undefined) { + throw new Error(`Could not find ${header} header`); + } + return foundHeader; +}; + +export const getHeader = (headers: HeadersLike | Headers, header: string): string | undefined => { const lowerCasedHeader = header.toLowerCase(); if (isHeadersProtocol(headers)) { // to deal with the case where the header looks like Stainless-Event-Id @@ -1196,7 +1208,7 @@ export const getRequiredHeader = (headers: HeadersLike, header: string): string } } - throw new Error(`Could not find ${header} header`); + return undefined; }; /** diff --git a/tests/index.test.ts b/tests/index.test.ts index a6fa97199..b55ec5f67 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -266,6 +266,64 @@ describe('retries', () => { expect(count).toEqual(3); }); + test('omit retry count header', async () => { + let count = 0; + let capturedRequest: RequestInit | undefined; + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { + count++; + if (count <= 2) { + return new Response(undefined, { + status: 429, + headers: { + 'Retry-After': '0.1', + }, + }); + } + capturedRequest = init; + return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); + }; + const client = new OpenAI({ apiKey: 'My API Key', fetch: testFetch, maxRetries: 4 }); + + expect( + await client.request({ + path: '/foo', + method: 'get', + headers: { 'X-Stainless-Retry-Count': null }, + }), + ).toEqual({ a: 1 }); + + expect(capturedRequest!.headers as Headers).not.toHaveProperty('x-stainless-retry-count'); + }); + + test('overwrite retry count header', async () => { + let count = 0; + let capturedRequest: RequestInit | undefined; + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { + count++; + if (count <= 2) { + return new Response(undefined, { + status: 429, + headers: { + 'Retry-After': '0.1', + }, + }); + } + capturedRequest = init; + return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); + }; + const client = new OpenAI({ apiKey: 'My API Key', fetch: testFetch, maxRetries: 4 }); + + expect( + await client.request({ + path: '/foo', + method: 'get', + headers: { 'X-Stainless-Retry-Count': '42' }, + }), + ).toEqual({ a: 1 }); + + expect((capturedRequest!.headers as Headers)['x-stainless-retry-count']).toBe('42'); + }); + test('retry on 429 with retry-after', async () => { let count = 0; const testFetch = async (url: RequestInfo, { signal }: RequestInit = {}): Promise => { From b7db4b15fd6306100efc859cdf44497470e60afc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 14:32:16 +0000 Subject: [PATCH 271/533] release: 4.64.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 24 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 541459357..27e41843b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.63.0" + ".": "4.64.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index a9677f6d3..dd4553eb4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 4.64.0 (2024-09-25) + +Full Changelog: [v4.63.0...v4.64.0](https://github.com/openai/openai-node/compare/v4.63.0...v4.64.0) + +### Features + +* **client:** allow overriding retry count header ([#1098](https://github.com/openai/openai-node/issues/1098)) ([a466ff7](https://github.com/openai/openai-node/commit/a466ff78a436db82d79a8f53066a85a3b1dbe039)) + + +### Bug Fixes + +* **audio:** correct response_format translations type ([#1097](https://github.com/openai/openai-node/issues/1097)) ([9a5f461](https://github.com/openai/openai-node/commit/9a5f461306e84b62ce1ed8aedbfee90798def5fb)) + + +### Chores + +* **internal:** fix ecosystem tests error output ([#1096](https://github.com/openai/openai-node/issues/1096)) ([ecdb4e9](https://github.com/openai/openai-node/commit/ecdb4e923f94e828d8758559aea78c82417b8f12)) +* **internal:** fix slow ecosystem test ([#1093](https://github.com/openai/openai-node/issues/1093)) ([80ed9ec](https://github.com/openai/openai-node/commit/80ed9ecbd60129164cb407e46dddbc06ef1c54ab)) + ## 4.63.0 (2024-09-20) Full Changelog: [v4.62.1...v4.63.0](https://github.com/openai/openai-node/compare/v4.62.1...v4.63.0) diff --git a/README.md b/README.md index b2a3bc4b4..3d03b9c5d 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.63.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.64.0/mod.ts'; ``` diff --git a/package.json b/package.json index 831169e2d..fbd5bbc07 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.63.0", + "version": "4.64.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index f006e3f3f..fc61a2d35 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.63.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.64.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index ee209cb0e..ebc183c48 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.63.0'; // x-release-please-version +export const VERSION = '4.64.0'; // x-release-please-version From 43464dce83eb2732d195b3669d1cc8d0fc4e1528 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 26 Sep 2024 17:25:07 +0000 Subject: [PATCH 272/533] feat(api): add omni-moderation model (#1100) --- .stats.yml | 2 +- api.md | 3 + src/index.ts | 3 + src/resources/index.ts | 3 + src/resources/moderations.ts | 175 ++++++++++++++++++++++-- tests/api-resources/moderations.test.ts | 2 +- 6 files changed, 174 insertions(+), 14 deletions(-) diff --git a/.stats.yml b/.stats.yml index e8bca3c6d..0998368a4 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-073331021d48db6af646a3552ab0c682efe31b7fb4e59a109ed1ba539f9b89c5.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-17ddd746c775ca4d4fbe64e5621ac30756ef09c061ff6313190b6ec162222d4c.yml diff --git a/api.md b/api.md index 39cf72f1d..73ac38068 100644 --- a/api.md +++ b/api.md @@ -145,7 +145,10 @@ Methods: Types: - Moderation +- ModerationImageURLInput - ModerationModel +- ModerationMultiModalInput +- ModerationTextInput - ModerationCreateResponse Methods: diff --git a/src/index.ts b/src/index.ts index 55f603608..d3e1d2a78 100644 --- a/src/index.ts +++ b/src/index.ts @@ -302,7 +302,10 @@ export namespace OpenAI { export import Moderations = API.Moderations; export import Moderation = API.Moderation; + export import ModerationImageURLInput = API.ModerationImageURLInput; export import ModerationModel = API.ModerationModel; + export import ModerationMultiModalInput = API.ModerationMultiModalInput; + export import ModerationTextInput = API.ModerationTextInput; export import ModerationCreateResponse = API.ModerationCreateResponse; export import ModerationCreateParams = API.ModerationCreateParams; diff --git a/src/resources/index.ts b/src/resources/index.ts index 87203ab39..15c5db77f 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -52,7 +52,10 @@ export { export { Model, ModelDeleted, ModelsPage, Models } from './models'; export { Moderation, + ModerationImageURLInput, ModerationModel, + ModerationMultiModalInput, + ModerationTextInput, ModerationCreateResponse, ModerationCreateParams, Moderations, diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index f80bc7acb..ba800509e 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -6,7 +6,8 @@ import * as ModerationsAPI from './moderations'; export class Moderations extends APIResource { /** - * Classifies if text is potentially harmful. + * Classifies if text and/or image inputs are potentially harmful. Learn more in + * the [moderation guide](https://platform.openai.com/docs/guides/moderation). */ create( body: ModerationCreateParams, @@ -22,6 +23,11 @@ export interface Moderation { */ categories: Moderation.Categories; + /** + * A list of the categories along with the input type(s) that the score applies to. + */ + category_applied_input_types: Moderation.CategoryAppliedInputTypes; + /** * A list of the categories along with their scores as predicted by model. */ @@ -65,6 +71,20 @@ export namespace Moderation { */ 'hate/threatening': boolean; + /** + * Content that includes instructions or advice that facilitate the planning or + * execution of wrongdoing, or that gives advice or instruction on how to commit + * illicit acts. For example, "how to shoplift" would fit this category. + */ + illicit: boolean; + + /** + * Content that includes instructions or advice that facilitate the planning or + * execution of wrongdoing that also includes violence, or that gives advice or + * instruction on the procurement of any weapon. + */ + 'illicit/violent': boolean; + /** * Content that promotes, encourages, or depicts acts of self-harm, such as * suicide, cutting, and eating disorders. @@ -107,6 +127,76 @@ export namespace Moderation { 'violence/graphic': boolean; } + /** + * A list of the categories along with the input type(s) that the score applies to. + */ + export interface CategoryAppliedInputTypes { + /** + * The applied input type(s) for the category 'harassment'. + */ + harassment: Array<'text'>; + + /** + * The applied input type(s) for the category 'harassment/threatening'. + */ + 'harassment/threatening': Array<'text'>; + + /** + * The applied input type(s) for the category 'hate'. + */ + hate: Array<'text'>; + + /** + * The applied input type(s) for the category 'hate/threatening'. + */ + 'hate/threatening': Array<'text'>; + + /** + * The applied input type(s) for the category 'illicit'. + */ + illicit: Array<'text'>; + + /** + * The applied input type(s) for the category 'illicit/violent'. + */ + 'illicit/violent': Array<'text'>; + + /** + * The applied input type(s) for the category 'self-harm'. + */ + 'self-harm': Array<'text' | 'image'>; + + /** + * The applied input type(s) for the category 'self-harm/instructions'. + */ + 'self-harm/instructions': Array<'text' | 'image'>; + + /** + * The applied input type(s) for the category 'self-harm/intent'. + */ + 'self-harm/intent': Array<'text' | 'image'>; + + /** + * The applied input type(s) for the category 'sexual'. + */ + sexual: Array<'text' | 'image'>; + + /** + * The applied input type(s) for the category 'sexual/minors'. + */ + 'sexual/minors': Array<'text'>; + + /** + * The applied input type(s) for the category 'violence'. + */ + violence: Array<'text' | 'image'>; + + /** + * The applied input type(s) for the category 'violence/graphic'. + */ + 'violence/graphic': Array<'text' | 'image'>; + } + /** * A list of the categories along with their scores as predicted by model. */ @@ -131,6 +221,16 @@ export namespace Moderation { */ 'hate/threatening': number; + /** + * The score for the category 'illicit'. + */ + illicit: number; + + /** + * The score for the category 'illicit/violent'. + */ + 'illicit/violent': number; + /** * The score for the category 'self-harm'. */ @@ -168,7 +268,58 @@ export namespace Moderation { } } -export type ModerationModel = 'text-moderation-latest' | 'text-moderation-stable'; +/** + * An object describing an image to classify. + */ +export interface ModerationImageURLInput { + /** + * Contains either an image URL or a data URL for a base64 encoded image. + */ + image_url: ModerationImageURLInput.ImageURL; + + /** + * Always `image_url`. + */ + type: 'image_url'; +} + +export namespace ModerationImageURLInput { + /** + * Contains either an image URL or a data URL for a base64 encoded image. + */ + export interface ImageURL { + /** + * Either a URL of the image or the base64 encoded image data. + */ + url: string; + } +} + +export type ModerationModel = + | 'omni-moderation-latest' + | 'omni-moderation-2024-09-26' + | 'text-moderation-latest' + | 'text-moderation-stable'; + +/** + * An object describing an image to classify. + */ +export type ModerationMultiModalInput = ModerationImageURLInput | ModerationTextInput; + +/** + * An object describing text to classify. + */ +export interface ModerationTextInput { + /** + * A string of text to classify. + */ + text: string; + + /** + * Always `text`. + */ + type: 'text'; +} /** * Represents if a given text input is potentially harmful. @@ -192,26 +343,26 @@ export interface ModerationCreateResponse { export interface ModerationCreateParams { /** - * The input text to classify + * Input (or inputs) to classify. Can be a single string, an array of strings, or + * an array of multi-modal input objects similar to other models. */ - input: string | Array; + input: string | Array | Array; /** - * Two content moderations models are available: `text-moderation-stable` and - * `text-moderation-latest`. - * - * The default is `text-moderation-latest` which will be automatically upgraded - * over time. This ensures you are always using our most accurate model. If you use - * `text-moderation-stable`, we will provide advanced notice before updating the - * model. Accuracy of `text-moderation-stable` may be slightly lower than for - * `text-moderation-latest`. + * The content moderation model you would like to use. Learn more in + * [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + * learn about available models + * [here](https://platform.openai.com/docs/models/moderation). */ model?: (string & {}) | ModerationModel; } export namespace Moderations { export import Moderation = ModerationsAPI.Moderation; + export import ModerationImageURLInput = ModerationsAPI.ModerationImageURLInput; export import ModerationModel = ModerationsAPI.ModerationModel; + export import ModerationMultiModalInput = ModerationsAPI.ModerationMultiModalInput; + export import ModerationTextInput = ModerationsAPI.ModerationTextInput; export import ModerationCreateResponse = ModerationsAPI.ModerationCreateResponse; export import ModerationCreateParams = ModerationsAPI.ModerationCreateParams; } diff --git a/tests/api-resources/moderations.test.ts b/tests/api-resources/moderations.test.ts index 0df1f0371..64f9acf3c 100644 --- a/tests/api-resources/moderations.test.ts +++ b/tests/api-resources/moderations.test.ts @@ -23,7 +23,7 @@ describe('resource moderations', () => { test('create: required and optional params', async () => { const response = await client.moderations.create({ input: 'I want to kill them.', - model: 'text-moderation-stable', + model: 'omni-moderation-2024-09-26', }); }); }); From 453141d1af377d5251929c1d505c3a1e3c2b9ca0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 26 Sep 2024 17:25:34 +0000 Subject: [PATCH 273/533] release: 4.65.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 27e41843b..98144b04e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.64.0" + ".": "4.65.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index dd4553eb4..d05b606e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.65.0 (2024-09-26) + +Full Changelog: [v4.64.0...v4.65.0](https://github.com/openai/openai-node/compare/v4.64.0...v4.65.0) + +### Features + +* **api:** add omni-moderation model ([#1100](https://github.com/openai/openai-node/issues/1100)) ([66c0f21](https://github.com/openai/openai-node/commit/66c0f21fad3be9c57b810c4a7eebb71eb6ccbcc1)) + ## 4.64.0 (2024-09-25) Full Changelog: [v4.63.0...v4.64.0](https://github.com/openai/openai-node/compare/v4.63.0...v4.64.0) diff --git a/README.md b/README.md index 3d03b9c5d..7bcb0dcda 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.64.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.65.0/mod.ts'; ``` diff --git a/package.json b/package.json index fbd5bbc07..03825be64 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.64.0", + "version": "4.65.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index fc61a2d35..cbedabbb5 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.64.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.65.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index ebc183c48..db5c3bcf7 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.64.0'; // x-release-please-version +export const VERSION = '4.65.0'; // x-release-please-version From 9e6c55a53940e88ae6af6033980081d5e5ea7d80 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 27 Sep 2024 16:02:33 +0100 Subject: [PATCH 274/533] feat(client): add request_id to `.withResponse()` (#1095) --- src/core.ts | 9 ++++++--- tests/responses.test.ts | 21 +++++++++++++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/src/core.ts b/src/core.ts index f0619c3d9..c104b61d1 100644 --- a/src/core.ts +++ b/src/core.ts @@ -138,8 +138,11 @@ export class APIPromise extends Promise> { asResponse(): Promise { return this.responsePromise.then((p) => p.response); } + /** - * Gets the parsed response data and the raw `Response` instance. + * Gets the parsed response data, the raw `Response` instance and the ID of the request, + * returned via the X-Request-ID header which is useful for debugging requests and reporting + * issues to OpenAI. * * If you just want to get the raw `Response` instance without parsing it, * you can use {@link asResponse()}. @@ -151,9 +154,9 @@ export class APIPromise extends Promise> { * - `import 'openai/shims/node'` (if you're running on Node) * - `import 'openai/shims/web'` (otherwise) */ - async withResponse(): Promise<{ data: T; response: Response }> { + async withResponse(): Promise<{ data: T; response: Response; request_id: string | null | undefined }> { const [data, response] = await Promise.all([this.parse(), this.asResponse()]); - return { data, response }; + return { data, response, request_id: response.headers.get('x-request-id') }; } private parse(): Promise> { diff --git a/tests/responses.test.ts b/tests/responses.test.ts index fbd073a79..527763465 100644 --- a/tests/responses.test.ts +++ b/tests/responses.test.ts @@ -41,6 +41,27 @@ describe('request id', () => { compareType>>, Array<{ foo: string }>>(true); }); + test('withResponse', async () => { + const client = new OpenAI({ + apiKey: 'dummy', + fetch: async () => + new Response(JSON.stringify({ id: 'bar' }), { + headers: { 'x-request-id': 'req_id_xxx', 'content-type': 'application/json' }, + }), + }); + + const { + data: completion, + response, + request_id, + } = await client.chat.completions.create({ messages: [], model: 'gpt-4' }).withResponse(); + + expect(request_id).toBe('req_id_xxx'); + expect(response.headers.get('x-request-id')).toBe('req_id_xxx'); + expect(completion.id).toBe('bar'); + expect(JSON.stringify(completion)).toBe('{"id":"bar"}'); + }); + test('object response', async () => { const client = new OpenAI({ apiKey: 'dummy', From 6792170ace26391e9e3af3ec81fb6b0b94817cb3 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 27 Sep 2024 23:52:15 +0100 Subject: [PATCH 275/533] fix(audio): correct types for transcriptions / translations (#1104) --- .stats.yml | 2 +- api.md | 10 +- src/resources/audio/audio.ts | 6 ++ src/resources/audio/index.ts | 18 +++- src/resources/audio/transcriptions.ts | 136 +++++++++++++++++++++++++- src/resources/audio/translations.ts | 50 +++++++++- 6 files changed, 211 insertions(+), 11 deletions(-) diff --git a/.stats.yml b/.stats.yml index 0998368a4..68789976b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-17ddd746c775ca4d4fbe64e5621ac30756ef09c061ff6313190b6ec162222d4c.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-71e58a77027c67e003fdd1b1ac8ac11557d8bfabc7666d1a827c6b1ca8ab98b5.yml diff --git a/api.md b/api.md index 73ac38068..71027acfd 100644 --- a/api.md +++ b/api.md @@ -115,20 +115,26 @@ Types: Types: - Transcription +- TranscriptionSegment +- TranscriptionVerbose +- TranscriptionWord +- TranscriptionCreateResponse Methods: -- client.audio.transcriptions.create({ ...params }) -> Transcription +- client.audio.transcriptions.create({ ...params }) -> TranscriptionCreateResponse ## Translations Types: - Translation +- TranslationVerbose +- TranslationCreateResponse Methods: -- client.audio.translations.create({ ...params }) -> Translation +- client.audio.translations.create({ ...params }) -> TranslationCreateResponse ## Speech diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index a8b35d986..9c2c2b982 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -25,9 +25,15 @@ export namespace Audio { export import AudioResponseFormat = AudioAPI.AudioResponseFormat; export import Transcriptions = TranscriptionsAPI.Transcriptions; export import Transcription = TranscriptionsAPI.Transcription; + export import TranscriptionSegment = TranscriptionsAPI.TranscriptionSegment; + export import TranscriptionVerbose = TranscriptionsAPI.TranscriptionVerbose; + export import TranscriptionWord = TranscriptionsAPI.TranscriptionWord; + export import TranscriptionCreateResponse = TranscriptionsAPI.TranscriptionCreateResponse; export import TranscriptionCreateParams = TranscriptionsAPI.TranscriptionCreateParams; export import Translations = TranslationsAPI.Translations; export import Translation = TranslationsAPI.Translation; + export import TranslationVerbose = TranslationsAPI.TranslationVerbose; + export import TranslationCreateResponse = TranslationsAPI.TranslationCreateResponse; export import TranslationCreateParams = TranslationsAPI.TranslationCreateParams; export import Speech = SpeechAPI.Speech; export import SpeechModel = SpeechAPI.SpeechModel; diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts index e8836470c..952c05b03 100644 --- a/src/resources/audio/index.ts +++ b/src/resources/audio/index.ts @@ -2,5 +2,19 @@ export { AudioModel, AudioResponseFormat, Audio } from './audio'; export { SpeechModel, SpeechCreateParams, Speech } from './speech'; -export { Transcription, TranscriptionCreateParams, Transcriptions } from './transcriptions'; -export { Translation, TranslationCreateParams, Translations } from './translations'; +export { + Transcription, + TranscriptionSegment, + TranscriptionVerbose, + TranscriptionWord, + TranscriptionCreateResponse, + TranscriptionCreateParams, + Transcriptions, +} from './transcriptions'; +export { + Translation, + TranslationVerbose, + TranslationCreateResponse, + TranslationCreateParams, + Translations, +} from './translations'; diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 1ee6921cd..e230bc4a4 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -9,7 +9,22 @@ export class Transcriptions extends APIResource { /** * Transcribes audio into the input language. */ - create(body: TranscriptionCreateParams, options?: Core.RequestOptions): Core.APIPromise { + create( + body: TranscriptionCreateParams<'json' | undefined>, + options?: Core.RequestOptions, + ): Core.APIPromise; + create( + body: TranscriptionCreateParams<'verbose_json'>, + options?: Core.RequestOptions, + ): Core.APIPromise; + create( + body: TranscriptionCreateParams<'srt' | 'vtt' | 'text'>, + options?: Core.RequestOptions, + ): Core.APIPromise; + create( + body: TranscriptionCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { return this._client.post('/audio/transcriptions', Core.multipartFormRequestOptions({ body, ...options })); } } @@ -25,7 +40,118 @@ export interface Transcription { text: string; } -export interface TranscriptionCreateParams { +export interface TranscriptionSegment { + /** + * Unique identifier of the segment. + */ + id: number; + + /** + * Average logprob of the segment. If the value is lower than -1, consider the + * logprobs failed. + */ + avg_logprob: number; + + /** + * Compression ratio of the segment. If the value is greater than 2.4, consider the + * compression failed. + */ + compression_ratio: number; + + /** + * End time of the segment in seconds. + */ + end: number; + + /** + * Probability of no speech in the segment. If the value is higher than 1.0 and the + * `avg_logprob` is below -1, consider this segment silent. + */ + no_speech_prob: number; + + /** + * Seek offset of the segment. + */ + seek: number; + + /** + * Start time of the segment in seconds. + */ + start: number; + + /** + * Temperature parameter used for generating the segment. + */ + temperature: number; + + /** + * Text content of the segment. + */ + text: string; + + /** + * Array of token IDs for the text content. + */ + tokens: Array; +} + +/** + * Represents a verbose json transcription response returned by model, based on the + * provided input. + */ +export interface TranscriptionVerbose { + /** + * The duration of the input audio. + */ + duration: string; + + /** + * The language of the input audio. + */ + language: string; + + /** + * The transcribed text. + */ + text: string; + + /** + * Segments of the transcribed text and their corresponding details. + */ + segments?: Array; + + /** + * Extracted words and their corresponding timestamps. + */ + words?: Array; +} + +export interface TranscriptionWord { + /** + * End time of the word in seconds. + */ + end: number; + + /** + * Start time of the word in seconds. + */ + start: number; + + /** + * The text content of the word. + */ + word: string; +} + +/** + * Represents a transcription response returned by model, based on the provided + * input. + */ +export type TranscriptionCreateResponse = Transcription | TranscriptionVerbose; + +export interface TranscriptionCreateParams< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, +> { /** * The audio file object (not file name) to transcribe, in one of these formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. @@ -57,7 +183,7 @@ export interface TranscriptionCreateParams { * The format of the output, in one of these options: `json`, `text`, `srt`, * `verbose_json`, or `vtt`. */ - response_format?: AudioAPI.AudioResponseFormat; + response_format?: ResponseFormat; /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the @@ -80,5 +206,9 @@ export interface TranscriptionCreateParams { export namespace Transcriptions { export import Transcription = TranscriptionsAPI.Transcription; + export import TranscriptionSegment = TranscriptionsAPI.TranscriptionSegment; + export import TranscriptionVerbose = TranscriptionsAPI.TranscriptionVerbose; + export import TranscriptionWord = TranscriptionsAPI.TranscriptionWord; + export import TranscriptionCreateResponse = TranscriptionsAPI.TranscriptionCreateResponse; export import TranscriptionCreateParams = TranscriptionsAPI.TranscriptionCreateParams; } diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 6df718112..819804332 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -4,12 +4,28 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; import * as TranslationsAPI from './translations'; import * as AudioAPI from './audio'; +import * as TranscriptionsAPI from './transcriptions'; export class Translations extends APIResource { /** * Translates audio into English. */ - create(body: TranslationCreateParams, options?: Core.RequestOptions): Core.APIPromise { + create( + body: TranslationCreateParams<'json' | undefined>, + options?: Core.RequestOptions, + ): Core.APIPromise; + create( + body: TranslationCreateParams<'verbose_json'>, + options?: Core.RequestOptions, + ): Core.APIPromise; + create( + body: TranslationCreateParams<'text' | 'srt' | 'vtt'>, + options?: Core.RequestOptions, + ): Core.APIPromise; + create( + body: TranslationCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { return this._client.post('/audio/translations', Core.multipartFormRequestOptions({ body, ...options })); } } @@ -18,7 +34,33 @@ export interface Translation { text: string; } -export interface TranslationCreateParams { +export interface TranslationVerbose { + /** + * The duration of the input audio. + */ + duration: string; + + /** + * The language of the output translation (always `english`). + */ + language: string; + + /** + * The translated text. + */ + text: string; + + /** + * Segments of the translated text and their corresponding details. + */ + segments?: Array; +} + +export type TranslationCreateResponse = Translation | TranslationVerbose; + +export interface TranslationCreateParams< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, +> { /** * The audio file object (not file name) translate, in one of these formats: flac, * mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. @@ -43,7 +85,7 @@ export interface TranslationCreateParams { * The format of the output, in one of these options: `json`, `text`, `srt`, * `verbose_json`, or `vtt`. */ - response_format?: AudioAPI.AudioResponseFormat; + response_format?: ResponseFormat; /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the @@ -57,5 +99,7 @@ export interface TranslationCreateParams { export namespace Translations { export import Translation = TranslationsAPI.Translation; + export import TranslationVerbose = TranslationsAPI.TranslationVerbose; + export import TranslationCreateResponse = TranslationsAPI.TranslationCreateResponse; export import TranslationCreateParams = TranslationsAPI.TranslationCreateParams; } From 806a873244cf9edac65d4c7999205a55e2a1b48d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Sep 2024 23:01:18 +0000 Subject: [PATCH 276/533] release: 4.66.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 98144b04e..efb39fad4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.65.0" + ".": "4.66.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d05b606e7..61e398258 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.66.0 (2024-09-27) + +Full Changelog: [v4.65.0...v4.66.0](https://github.com/openai/openai-node/compare/v4.65.0...v4.66.0) + +### Features + +* **client:** add request_id to `.withResponse()` ([#1095](https://github.com/openai/openai-node/issues/1095)) ([2d0f565](https://github.com/openai/openai-node/commit/2d0f565f124a8862bc24214cc3ddce9db0ba75bc)) + + +### Bug Fixes + +* **audio:** correct types for transcriptions / translations ([#1104](https://github.com/openai/openai-node/issues/1104)) ([96e86c2](https://github.com/openai/openai-node/commit/96e86c214ba79d50035b61e5daa3489f082512c4)) +* **client:** correct types for transcriptions / translations ([#1105](https://github.com/openai/openai-node/issues/1105)) ([fa16ebb](https://github.com/openai/openai-node/commit/fa16ebbb314ebc7c274d27f0148d248edf48e055)) + ## 4.65.0 (2024-09-26) Full Changelog: [v4.64.0...v4.65.0](https://github.com/openai/openai-node/compare/v4.64.0...v4.65.0) diff --git a/README.md b/README.md index 7bcb0dcda..1058dce4c 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.65.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.66.0/mod.ts'; ``` diff --git a/package.json b/package.json index 03825be64..b8cdcb658 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.65.0", + "version": "4.66.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index cbedabbb5..04f414a32 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.65.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.66.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index db5c3bcf7..3da94762b 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.65.0'; // x-release-please-version +export const VERSION = '4.66.0'; // x-release-please-version From 7c6c1e29e1cb32648dc1dcc585d7f49f4b7c6438 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 30 Sep 2024 10:33:47 -0400 Subject: [PATCH 277/533] fix(audio): use export type --- src/resources/audio/audio.ts | 12 ++++++++++-- src/resources/audio/transcriptions.ts | 6 +++++- src/resources/audio/translations.ts | 6 +++++- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index 9c2c2b982..e06e28094 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -29,12 +29,20 @@ export namespace Audio { export import TranscriptionVerbose = TranscriptionsAPI.TranscriptionVerbose; export import TranscriptionWord = TranscriptionsAPI.TranscriptionWord; export import TranscriptionCreateResponse = TranscriptionsAPI.TranscriptionCreateResponse; - export import TranscriptionCreateParams = TranscriptionsAPI.TranscriptionCreateParams; + export type TranscriptionCreateParams< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = + | AudioAPI.AudioResponseFormat + | undefined, + > = TranscriptionsAPI.TranscriptionCreateParams; export import Translations = TranslationsAPI.Translations; export import Translation = TranslationsAPI.Translation; export import TranslationVerbose = TranslationsAPI.TranslationVerbose; export import TranslationCreateResponse = TranslationsAPI.TranslationCreateResponse; - export import TranslationCreateParams = TranslationsAPI.TranslationCreateParams; + export type TranslationCreateParams< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = + | AudioAPI.AudioResponseFormat + | undefined, + > = TranslationsAPI.TranslationCreateParams; export import Speech = SpeechAPI.Speech; export import SpeechModel = SpeechAPI.SpeechModel; export import SpeechCreateParams = SpeechAPI.SpeechCreateParams; diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index e230bc4a4..bdbfc6268 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -210,5 +210,9 @@ export namespace Transcriptions { export import TranscriptionVerbose = TranscriptionsAPI.TranscriptionVerbose; export import TranscriptionWord = TranscriptionsAPI.TranscriptionWord; export import TranscriptionCreateResponse = TranscriptionsAPI.TranscriptionCreateResponse; - export import TranscriptionCreateParams = TranscriptionsAPI.TranscriptionCreateParams; + export type TranscriptionCreateParams< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = + | AudioAPI.AudioResponseFormat + | undefined, + > = TranscriptionsAPI.TranscriptionCreateParams; } diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 819804332..a76c72351 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -101,5 +101,9 @@ export namespace Translations { export import Translation = TranslationsAPI.Translation; export import TranslationVerbose = TranslationsAPI.TranslationVerbose; export import TranslationCreateResponse = TranslationsAPI.TranslationCreateResponse; - export import TranslationCreateParams = TranslationsAPI.TranslationCreateParams; + export type TranslationCreateParams< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = + | AudioAPI.AudioResponseFormat + | undefined, + > = TranslationsAPI.TranslationCreateParams; } From fcbad2c5eaeabd368528a8bd26ea08fd62c690b5 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 30 Sep 2024 10:12:52 -0400 Subject: [PATCH 278/533] fix(audio): add fallback overload types --- src/resources/audio/transcriptions.ts | 1 + src/resources/audio/translations.ts | 1 + 2 files changed, 2 insertions(+) diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index bdbfc6268..902dc9e5f 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -21,6 +21,7 @@ export class Transcriptions extends APIResource { body: TranscriptionCreateParams<'srt' | 'vtt' | 'text'>, options?: Core.RequestOptions, ): Core.APIPromise; + create(body: TranscriptionCreateParams, options?: Core.RequestOptions): Core.APIPromise; create( body: TranscriptionCreateParams, options?: Core.RequestOptions, diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index a76c72351..36c2dc7c2 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -22,6 +22,7 @@ export class Translations extends APIResource { body: TranslationCreateParams<'text' | 'srt' | 'vtt'>, options?: Core.RequestOptions, ): Core.APIPromise; + create(body: TranslationCreateParams, options?: Core.RequestOptions): Core.APIPromise; create( body: TranslationCreateParams, options?: Core.RequestOptions, From 52c0bb506d418323f1f78ed197ebcd97ee948a7c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 14:47:44 +0000 Subject: [PATCH 279/533] release: 4.66.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 14 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index efb39fad4..933d4022c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.66.0" + ".": "4.66.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 61e398258..e927252ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.66.1 (2024-09-30) + +Full Changelog: [v4.66.0...v4.66.1](https://github.com/openai/openai-node/compare/v4.66.0...v4.66.1) + +### Bug Fixes + +* **audio:** add fallback overload types ([0c00a13](https://github.com/openai/openai-node/commit/0c00a13dd864b974d3376c905647209e4a79f244)) +* **audio:** use export type ([1519100](https://github.com/openai/openai-node/commit/1519100e530e08e7683549d0bcdd919b9c2d1654)) + ## 4.66.0 (2024-09-27) Full Changelog: [v4.65.0...v4.66.0](https://github.com/openai/openai-node/compare/v4.65.0...v4.66.0) diff --git a/README.md b/README.md index 1058dce4c..a0132bb63 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.66.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.66.1/mod.ts'; ``` diff --git a/package.json b/package.json index b8cdcb658..038d377ea 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.66.0", + "version": "4.66.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 04f414a32..7d0c4219d 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.66.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.66.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 3da94762b..ce3023d68 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.66.0'; // x-release-please-version +export const VERSION = '4.66.1'; // x-release-please-version From 96225e167b2d29be4f5a9a266a7391c0e8e04cef Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 1 Oct 2024 17:52:44 +0000 Subject: [PATCH 280/533] feat(api): support storing chat completions, enabling evals and model distillation in the dashboard (#1112) Learn more at http://openai.com/devday2024 --- src/resources/chat/chat.ts | 1 + src/resources/chat/completions.ts | 20 ++++++++++++++-- src/resources/completions.ts | 25 ++++++++++++++++++++ tests/api-resources/chat/completions.test.ts | 2 ++ 4 files changed, 46 insertions(+), 2 deletions(-) diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 1a758fbb5..5bc7de955 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -16,6 +16,7 @@ export type ChatModel = | 'gpt-4o' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-05-13' + | 'gpt-4o-realtime-preview-2024-10-01' | 'chatgpt-4o-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index f426ce36f..27aebdc4c 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -727,8 +727,12 @@ export type ChatCompletionCreateParams = export interface ChatCompletionCreateParamsBase { /** - * A list of messages comprising the conversation so far. - * [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + * A list of messages comprising the conversation so far. Depending on the + * [model](https://platform.openai.com/docs/models) you use, different message + * types (modalities) are supported, like + * [text](https://platform.openai.com/docs/guides/text-generation), + * [images](https://platform.openai.com/docs/guides/vision), and + * [audio](https://platform.openai.com/docs/guides/audio). */ messages: Array; @@ -806,6 +810,12 @@ export interface ChatCompletionCreateParamsBase { */ max_tokens?: number | null; + /** + * Developer-defined tags and values used for filtering completions in the + * [dashboard](https://platform.openai.com/completions). + */ + metadata?: Record | null; + /** * How many chat completion choices to generate for each input message. Note that * you will be charged based on the number of generated tokens across all of the @@ -889,6 +899,12 @@ export interface ChatCompletionCreateParamsBase { */ stop?: string | null | Array; + /** + * Whether or not to store the output of this completion request for traffic + * logging in the [dashboard](https://platform.openai.com/completions). + */ + store?: boolean | null; + /** * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be * sent as data-only diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 152496766..7acd5d13f 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -125,6 +125,11 @@ export interface CompletionUsage { * Breakdown of tokens used in a completion. */ completion_tokens_details?: CompletionUsage.CompletionTokensDetails; + + /** + * Breakdown of tokens used in the prompt. + */ + prompt_tokens_details?: CompletionUsage.PromptTokensDetails; } export namespace CompletionUsage { @@ -132,11 +137,31 @@ export namespace CompletionUsage { * Breakdown of tokens used in a completion. */ export interface CompletionTokensDetails { + /** + * Audio input tokens generated by the model. + */ + audio_tokens?: number; + /** * Tokens generated by the model for reasoning. */ reasoning_tokens?: number; } + + /** + * Breakdown of tokens used in the prompt. + */ + export interface PromptTokensDetails { + /** + * Audio input tokens present in the prompt. + */ + audio_tokens?: number; + + /** + * Cached tokens present in the prompt. + */ + cached_tokens?: number; + } } export type CompletionCreateParams = CompletionCreateParamsNonStreaming | CompletionCreateParamsStreaming; diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 692b953f2..4f015b47e 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -34,6 +34,7 @@ describe('resource completions', () => { logprobs: true, max_completion_tokens: 0, max_tokens: 0, + metadata: { foo: 'string' }, n: 1, parallel_tool_calls: true, presence_penalty: -2, @@ -41,6 +42,7 @@ describe('resource completions', () => { seed: -9007199254740991, service_tier: 'auto', stop: 'string', + store: true, stream: false, stream_options: { include_usage: true }, temperature: 1, From e53eb194c9a79064e303575fb92b3ee5ddd77268 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 17:53:05 +0000 Subject: [PATCH 281/533] release: 4.67.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 933d4022c..6872cea50 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.66.1" + ".": "4.67.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e927252ce..1c8960f56 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.67.0 (2024-10-01) + +Full Changelog: [v4.66.1...v4.67.0](https://github.com/openai/openai-node/compare/v4.66.1...v4.67.0) + +### Features + +* **api:** support storing chat completions, enabling evals and model distillation in the dashboard ([#1112](https://github.com/openai/openai-node/issues/1112)) ([6424924](https://github.com/openai/openai-node/commit/6424924b6361e54f07c04fce9075ab16fcb712fb)) + ## 4.66.1 (2024-09-30) Full Changelog: [v4.66.0...v4.66.1](https://github.com/openai/openai-node/compare/v4.66.0...v4.66.1) diff --git a/README.md b/README.md index a0132bb63..4eaa2a8f7 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.66.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.67.0/mod.ts'; ``` diff --git a/package.json b/package.json index 038d377ea..eb6aa2fa0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.66.1", + "version": "4.67.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 7d0c4219d..91d392205 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.66.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.67.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index ce3023d68..fe2f611c2 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.66.1'; // x-release-please-version +export const VERSION = '4.67.0'; // x-release-please-version From 6ff5cd9ea2d5267d4784f77e0be71efa8bcc5591 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 19:35:05 +0000 Subject: [PATCH 282/533] docs: improve and reference contributing documentation (#1115) --- CONTRIBUTING.md | 46 +++++++++++++++++++++++----------------------- README.md | 4 ++++ 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 62b48d828..e8bbc1b07 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,9 +5,9 @@ Other package managers may work but are not officially supported for development To set up the repository, run: -```bash -yarn -yarn build +```sh +$ yarn +$ yarn build ``` This will install all the required dependencies and build output files to `dist/`. @@ -22,7 +22,7 @@ modify the contents of the `src/lib/` and `examples/` directories. All files in the `examples/` directory are not modified by the generator and can be freely edited or added to. -```bash +```ts // add an example to examples/.ts #!/usr/bin/env -S npm run tsn -T @@ -41,38 +41,38 @@ If you’d like to use the repository from source, you can either install from g To install via git: -```bash -npm install git+ssh://git@github.com:openai/openai-node.git +```sh +$ npm install git+ssh://git@github.com:openai/openai-node.git ``` Alternatively, to link a local copy of the repo: -```bash +```sh # Clone -git clone https://www.github.com/openai/openai-node -cd openai-node +$ git clone https://www.github.com/openai/openai-node +$ cd openai-node # With yarn -yarn link -cd ../my-package -yarn link openai +$ yarn link +$ cd ../my-package +$ yarn link openai # With pnpm -pnpm link --global -cd ../my-package -pnpm link -—global openai +$ pnpm link --global +$ cd ../my-package +$ pnpm link -—global openai ``` ## Running tests Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. -```bash -npx prism mock path/to/your/openapi.yml +```sh +$ npx prism mock path/to/your/openapi.yml ``` -```bash -yarn run test +```sh +$ yarn run test ``` ## Linting and formatting @@ -82,14 +82,14 @@ This repository uses [prettier](https://www.npmjs.com/package/prettier) and To lint: -```bash -yarn lint +```sh +$ yarn lint ``` To format and fix all lint issues automatically: -```bash -yarn fix +```sh +$ yarn fix ``` ## Publishing and releases diff --git a/README.md b/README.md index 4eaa2a8f7..c5fc3273b 100644 --- a/README.md +++ b/README.md @@ -650,3 +650,7 @@ The following runtimes are supported: Note that React Native is not supported at this time. If you are interested in other runtime environments, please open or upvote an issue on GitHub. + +## Contributing + +See [the contributing documentation](./CONTRIBUTING.md). From 785ef4b579637d3a6a205c0a9c51aa71987bf30e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 05:07:04 +0000 Subject: [PATCH 283/533] release: 4.67.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6872cea50..913154f97 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.67.0" + ".": "4.67.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c8960f56..b866aeff2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.67.1 (2024-10-02) + +Full Changelog: [v4.67.0...v4.67.1](https://github.com/openai/openai-node/compare/v4.67.0...v4.67.1) + +### Documentation + +* improve and reference contributing documentation ([#1115](https://github.com/openai/openai-node/issues/1115)) ([7fa30b3](https://github.com/openai/openai-node/commit/7fa30b3ebf276556141df95ba8e824a0276b61f8)) + ## 4.67.0 (2024-10-01) Full Changelog: [v4.66.1...v4.67.0](https://github.com/openai/openai-node/compare/v4.66.1...v4.67.0) diff --git a/README.md b/README.md index c5fc3273b..058f412d5 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.67.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.67.1/mod.ts'; ``` diff --git a/package.json b/package.json index eb6aa2fa0..b04cf8a6a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.67.0", + "version": "4.67.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 91d392205..a717ae498 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.67.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.67.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index fe2f611c2..b60359315 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.67.0'; // x-release-please-version +export const VERSION = '4.67.1'; // x-release-please-version From f7c96748caccf79aa60c63cb94f5a8582f45e371 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 18:11:15 +0000 Subject: [PATCH 284/533] chore(internal): move LineDecoder to a separate file (#1120) --- src/internal/decoders/line.ts | 114 ++++++++++++++++++++++++++++++++++ src/streaming.ts | 112 +-------------------------------- 2 files changed, 115 insertions(+), 111 deletions(-) create mode 100644 src/internal/decoders/line.ts diff --git a/src/internal/decoders/line.ts b/src/internal/decoders/line.ts new file mode 100644 index 000000000..1e0bbf390 --- /dev/null +++ b/src/internal/decoders/line.ts @@ -0,0 +1,114 @@ +import { OpenAIError } from '../../error'; + +type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; + +/** + * A re-implementation of httpx's `LineDecoder` in Python that handles incrementally + * reading lines from text. + * + * https://github.com/encode/httpx/blob/920333ea98118e9cf617f246905d7b202510941c/httpx/_decoders.py#L258 + */ +export class LineDecoder { + // prettier-ignore + static NEWLINE_CHARS = new Set(['\n', '\r']); + static NEWLINE_REGEXP = /\r\n|[\n\r]/g; + + buffer: string[]; + trailingCR: boolean; + textDecoder: any; // TextDecoder found in browsers; not typed to avoid pulling in either "dom" or "node" types. + + constructor() { + this.buffer = []; + this.trailingCR = false; + } + + decode(chunk: Bytes): string[] { + let text = this.decodeText(chunk); + + if (this.trailingCR) { + text = '\r' + text; + this.trailingCR = false; + } + if (text.endsWith('\r')) { + this.trailingCR = true; + text = text.slice(0, -1); + } + + if (!text) { + return []; + } + + const trailingNewline = LineDecoder.NEWLINE_CHARS.has(text[text.length - 1] || ''); + let lines = text.split(LineDecoder.NEWLINE_REGEXP); + + // if there is a trailing new line then the last entry will be an empty + // string which we don't care about + if (trailingNewline) { + lines.pop(); + } + + if (lines.length === 1 && !trailingNewline) { + this.buffer.push(lines[0]!); + return []; + } + + if (this.buffer.length > 0) { + lines = [this.buffer.join('') + lines[0], ...lines.slice(1)]; + this.buffer = []; + } + + if (!trailingNewline) { + this.buffer = [lines.pop() || '']; + } + + return lines; + } + + decodeText(bytes: Bytes): string { + if (bytes == null) return ''; + if (typeof bytes === 'string') return bytes; + + // Node: + if (typeof Buffer !== 'undefined') { + if (bytes instanceof Buffer) { + return bytes.toString(); + } + if (bytes instanceof Uint8Array) { + return Buffer.from(bytes).toString(); + } + + throw new OpenAIError( + `Unexpected: received non-Uint8Array (${bytes.constructor.name}) stream chunk in an environment with a global "Buffer" defined, which this library assumes to be Node. Please report this error.`, + ); + } + + // Browser + if (typeof TextDecoder !== 'undefined') { + if (bytes instanceof Uint8Array || bytes instanceof ArrayBuffer) { + this.textDecoder ??= new TextDecoder('utf8'); + return this.textDecoder.decode(bytes); + } + + throw new OpenAIError( + `Unexpected: received non-Uint8Array/ArrayBuffer (${ + (bytes as any).constructor.name + }) in a web platform. Please report this error.`, + ); + } + + throw new OpenAIError( + `Unexpected: neither Buffer nor TextDecoder are available as globals. Please report this error.`, + ); + } + + flush(): string[] { + if (!this.buffer.length && !this.trailingCR) { + return []; + } + + const lines = [this.buffer.join('')]; + this.buffer = []; + this.trailingCR = false; + return lines; + } +} diff --git a/src/streaming.ts b/src/streaming.ts index 722a8f69c..597ee89fa 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -1,5 +1,6 @@ import { ReadableStream, type Response } from './_shims/index'; import { OpenAIError } from './error'; +import { LineDecoder } from './internal/decoders/line'; import { APIError } from 'openai/error'; @@ -343,117 +344,6 @@ class SSEDecoder { } } -/** - * A re-implementation of httpx's `LineDecoder` in Python that handles incrementally - * reading lines from text. - * - * https://github.com/encode/httpx/blob/920333ea98118e9cf617f246905d7b202510941c/httpx/_decoders.py#L258 - */ -class LineDecoder { - // prettier-ignore - static NEWLINE_CHARS = new Set(['\n', '\r']); - static NEWLINE_REGEXP = /\r\n|[\n\r]/g; - - buffer: string[]; - trailingCR: boolean; - textDecoder: any; // TextDecoder found in browsers; not typed to avoid pulling in either "dom" or "node" types. - - constructor() { - this.buffer = []; - this.trailingCR = false; - } - - decode(chunk: Bytes): string[] { - let text = this.decodeText(chunk); - - if (this.trailingCR) { - text = '\r' + text; - this.trailingCR = false; - } - if (text.endsWith('\r')) { - this.trailingCR = true; - text = text.slice(0, -1); - } - - if (!text) { - return []; - } - - const trailingNewline = LineDecoder.NEWLINE_CHARS.has(text[text.length - 1] || ''); - let lines = text.split(LineDecoder.NEWLINE_REGEXP); - - // if there is a trailing new line then the last entry will be an empty - // string which we don't care about - if (trailingNewline) { - lines.pop(); - } - - if (lines.length === 1 && !trailingNewline) { - this.buffer.push(lines[0]!); - return []; - } - - if (this.buffer.length > 0) { - lines = [this.buffer.join('') + lines[0], ...lines.slice(1)]; - this.buffer = []; - } - - if (!trailingNewline) { - this.buffer = [lines.pop() || '']; - } - - return lines; - } - - decodeText(bytes: Bytes): string { - if (bytes == null) return ''; - if (typeof bytes === 'string') return bytes; - - // Node: - if (typeof Buffer !== 'undefined') { - if (bytes instanceof Buffer) { - return bytes.toString(); - } - if (bytes instanceof Uint8Array) { - return Buffer.from(bytes).toString(); - } - - throw new OpenAIError( - `Unexpected: received non-Uint8Array (${bytes.constructor.name}) stream chunk in an environment with a global "Buffer" defined, which this library assumes to be Node. Please report this error.`, - ); - } - - // Browser - if (typeof TextDecoder !== 'undefined') { - if (bytes instanceof Uint8Array || bytes instanceof ArrayBuffer) { - this.textDecoder ??= new TextDecoder('utf8'); - return this.textDecoder.decode(bytes); - } - - throw new OpenAIError( - `Unexpected: received non-Uint8Array/ArrayBuffer (${ - (bytes as any).constructor.name - }) in a web platform. Please report this error.`, - ); - } - - throw new OpenAIError( - `Unexpected: neither Buffer nor TextDecoder are available as globals. Please report this error.`, - ); - } - - flush(): string[] { - if (!this.buffer.length && !this.trailingCR) { - return []; - } - - const lines = [this.buffer.join('')]; - this.buffer = []; - this.trailingCR = false; - return lines; - } -} - /** This is an internal helper function that's just used for testing */ export function _decodeChunks(chunks: string[]): string[] { const decoder = new LineDecoder(); From 7f8b872f68df6a77d0c27d858c18bccae939cdc6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 05:07:29 +0000 Subject: [PATCH 285/533] release: 4.67.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 913154f97..f45113292 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.67.1" + ".": "4.67.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index b866aeff2..72e61a413 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.67.2 (2024-10-07) + +Full Changelog: [v4.67.1...v4.67.2](https://github.com/openai/openai-node/compare/v4.67.1...v4.67.2) + +### Chores + +* **internal:** move LineDecoder to a separate file ([#1120](https://github.com/openai/openai-node/issues/1120)) ([0a4be65](https://github.com/openai/openai-node/commit/0a4be6506bf26d2b9552ff3fd13a22c04b53ea18)) + ## 4.67.1 (2024-10-02) Full Changelog: [v4.67.0...v4.67.1](https://github.com/openai/openai-node/compare/v4.67.0...v4.67.1) diff --git a/README.md b/README.md index 058f412d5..11e1455e2 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.67.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.67.2/mod.ts'; ``` diff --git a/package.json b/package.json index b04cf8a6a..79b73d5cb 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.67.1", + "version": "4.67.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index a717ae498..46c3f3db3 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.67.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.67.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index b60359315..fb7e251f7 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.67.1'; // x-release-please-version +export const VERSION = '4.67.2'; // x-release-please-version From 6cdc77de20f27cbc9e0a3bac52f2308e4216ad76 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 8 Oct 2024 13:42:35 +0000 Subject: [PATCH 286/533] chore(internal): pass props through internal parser (#1125) --- src/core.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core.ts b/src/core.ts index c104b61d1..d78e9e926 100644 --- a/src/core.ts +++ b/src/core.ts @@ -116,9 +116,9 @@ export class APIPromise extends Promise> { }); } - _thenUnwrap(transform: (data: T) => U): APIPromise { + _thenUnwrap(transform: (data: T, props: APIResponseProps) => U): APIPromise { return new APIPromise(this.responsePromise, async (props) => - _addRequestID(transform(await this.parseResponse(props)), props.response), + _addRequestID(transform(await this.parseResponse(props), props), props.response), ); } From 3c326626c1035d43ee184cedc9084a7d399331df Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 13:42:59 +0000 Subject: [PATCH 287/533] release: 4.67.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f45113292..e8c54ecee 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.67.2" + ".": "4.67.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 72e61a413..710d09ca9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.67.3 (2024-10-08) + +Full Changelog: [v4.67.2...v4.67.3](https://github.com/openai/openai-node/compare/v4.67.2...v4.67.3) + +### Chores + +* **internal:** pass props through internal parser ([#1125](https://github.com/openai/openai-node/issues/1125)) ([5ef8aa8](https://github.com/openai/openai-node/commit/5ef8aa8d308f7374dd01d8079cd76e0d96999ec2)) + ## 4.67.2 (2024-10-07) Full Changelog: [v4.67.1...v4.67.2](https://github.com/openai/openai-node/compare/v4.67.1...v4.67.2) diff --git a/README.md b/README.md index 11e1455e2..407933634 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.67.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.67.3/mod.ts'; ``` diff --git a/package.json b/package.json index 79b73d5cb..e20c1b9c1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.67.2", + "version": "4.67.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 46c3f3db3..f59404dbc 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.67.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.67.3/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index fb7e251f7..174c31111 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.67.2'; // x-release-please-version +export const VERSION = '4.67.3'; // x-release-please-version From 16e094b5e50e4b61e5d1f6519282a12672fc71a3 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Thu, 17 Oct 2024 16:52:53 +0000 Subject: [PATCH 288/533] feat(api): add gpt-4o-audio-preview model for chat completions (#1135) This enables audio inputs and outputs. https://platform.openai.com/docs/guides/audio --- .stats.yml | 2 +- api.md | 4 + src/index.ts | 4 + src/lib/AbstractChatCompletionRunner.ts | 4 +- src/resources/beta/assistants.ts | 10 ++ src/resources/chat/chat.ts | 7 + src/resources/chat/completions.ts | 153 ++++++++++++++++++- src/resources/chat/index.ts | 4 + tests/api-resources/chat/completions.test.ts | 2 + 9 files changed, 183 insertions(+), 7 deletions(-) diff --git a/.stats.yml b/.stats.yml index 68789976b..984e8a8d5 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-71e58a77027c67e003fdd1b1ac8ac11557d8bfabc7666d1a827c6b1ca8ab98b5.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8729aaa35436531ab453224af10e67f89677db8f350f0346bb3537489edea649.yml diff --git a/api.md b/api.md index 71027acfd..da60f65bd 100644 --- a/api.md +++ b/api.md @@ -33,9 +33,12 @@ Types: - ChatCompletion - ChatCompletionAssistantMessageParam +- ChatCompletionAudio +- ChatCompletionAudioParam - ChatCompletionChunk - ChatCompletionContentPart - ChatCompletionContentPartImage +- ChatCompletionContentPartInputAudio - ChatCompletionContentPartRefusal - ChatCompletionContentPartText - ChatCompletionFunctionCallOption @@ -43,6 +46,7 @@ Types: - ChatCompletionMessage - ChatCompletionMessageParam - ChatCompletionMessageToolCall +- ChatCompletionModality - ChatCompletionNamedToolChoice - ChatCompletionRole - ChatCompletionStreamOptions diff --git a/src/index.ts b/src/index.ts index d3e1d2a78..56108223a 100644 --- a/src/index.ts +++ b/src/index.ts @@ -250,9 +250,12 @@ export namespace OpenAI { export import ChatModel = API.ChatModel; export import ChatCompletion = API.ChatCompletion; export import ChatCompletionAssistantMessageParam = API.ChatCompletionAssistantMessageParam; + export import ChatCompletionAudio = API.ChatCompletionAudio; + export import ChatCompletionAudioParam = API.ChatCompletionAudioParam; export import ChatCompletionChunk = API.ChatCompletionChunk; export import ChatCompletionContentPart = API.ChatCompletionContentPart; export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage; + export import ChatCompletionContentPartInputAudio = API.ChatCompletionContentPartInputAudio; export import ChatCompletionContentPartRefusal = API.ChatCompletionContentPartRefusal; export import ChatCompletionContentPartText = API.ChatCompletionContentPartText; export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption; @@ -260,6 +263,7 @@ export namespace OpenAI { export import ChatCompletionMessage = API.ChatCompletionMessage; export import ChatCompletionMessageParam = API.ChatCompletionMessageParam; export import ChatCompletionMessageToolCall = API.ChatCompletionMessageToolCall; + export import ChatCompletionModality = API.ChatCompletionModality; export import ChatCompletionNamedToolChoice = API.ChatCompletionNamedToolChoice; export import ChatCompletionRole = API.ChatCompletionRole; export import ChatCompletionStreamOptions = API.ChatCompletionStreamOptions; diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts index 39ee4e993..e943a4e4f 100644 --- a/src/lib/AbstractChatCompletionRunner.ts +++ b/src/lib/AbstractChatCompletionRunner.ts @@ -105,7 +105,9 @@ export class AbstractChatCompletionRunner< const message = this.messages[i]; if (isAssistantMessage(message)) { const { function_call, ...rest } = message; - const ret: ChatCompletionMessage = { + + // TODO: support audio here + const ret: Omit = { ...rest, content: (message as ChatCompletionMessage).content ?? null, refusal: (message as ChatCompletionMessage).refusal ?? null, diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 410d520b0..aa7362297 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -298,6 +298,11 @@ export namespace AssistantStreamEvent { data: ThreadsAPI.Thread; event: 'thread.created'; + + /** + * Whether to enable input audio transcription. + */ + enabled?: boolean; } /** @@ -1084,6 +1089,11 @@ export interface ThreadStreamEvent { data: ThreadsAPI.Thread; event: 'thread.created'; + + /** + * Whether to enable input audio transcription. + */ + enabled?: boolean; } export interface AssistantCreateParams { diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 5bc7de955..43ef5662c 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -16,7 +16,10 @@ export type ChatModel = | 'gpt-4o' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-05-13' + | 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-audio-preview' + | 'gpt-4o-audio-preview-2024-10-01' | 'chatgpt-4o-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' @@ -45,9 +48,12 @@ export namespace Chat { export import Completions = CompletionsAPI.Completions; export import ChatCompletion = CompletionsAPI.ChatCompletion; export import ChatCompletionAssistantMessageParam = CompletionsAPI.ChatCompletionAssistantMessageParam; + export import ChatCompletionAudio = CompletionsAPI.ChatCompletionAudio; + export import ChatCompletionAudioParam = CompletionsAPI.ChatCompletionAudioParam; export import ChatCompletionChunk = CompletionsAPI.ChatCompletionChunk; export import ChatCompletionContentPart = CompletionsAPI.ChatCompletionContentPart; export import ChatCompletionContentPartImage = CompletionsAPI.ChatCompletionContentPartImage; + export import ChatCompletionContentPartInputAudio = CompletionsAPI.ChatCompletionContentPartInputAudio; export import ChatCompletionContentPartRefusal = CompletionsAPI.ChatCompletionContentPartRefusal; export import ChatCompletionContentPartText = CompletionsAPI.ChatCompletionContentPartText; export import ChatCompletionFunctionCallOption = CompletionsAPI.ChatCompletionFunctionCallOption; @@ -55,6 +61,7 @@ export namespace Chat { export import ChatCompletionMessage = CompletionsAPI.ChatCompletionMessage; export import ChatCompletionMessageParam = CompletionsAPI.ChatCompletionMessageParam; export import ChatCompletionMessageToolCall = CompletionsAPI.ChatCompletionMessageToolCall; + export import ChatCompletionModality = CompletionsAPI.ChatCompletionModality; export import ChatCompletionNamedToolChoice = CompletionsAPI.ChatCompletionNamedToolChoice; export import ChatCompletionRole = CompletionsAPI.ChatCompletionRole; export import ChatCompletionStreamOptions = CompletionsAPI.ChatCompletionStreamOptions; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 27aebdc4c..97174ec1b 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -11,7 +11,10 @@ import { Stream } from '../../streaming'; export class Completions extends APIResource { /** - * Creates a model response for the given chat conversation. + * Creates a model response for the given chat conversation. Learn more in the + * [text generation](https://platform.openai.com/docs/guides/text-generation), + * [vision](https://platform.openai.com/docs/guides/vision), and + * [audio](https://platform.openai.com/docs/guides/audio) guides. */ create( body: ChatCompletionCreateParamsNonStreaming, @@ -138,6 +141,12 @@ export interface ChatCompletionAssistantMessageParam { */ role: 'assistant'; + /** + * Data about a previous audio response from the model. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ + audio?: ChatCompletionAssistantMessageParam.Audio | null; + /** * The contents of the assistant message. Required unless `tool_calls` or * `function_call` is specified. @@ -168,6 +177,17 @@ export interface ChatCompletionAssistantMessageParam { } export namespace ChatCompletionAssistantMessageParam { + /** + * Data about a previous audio response from the model. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ + export interface Audio { + /** + * Unique identifier for a previous audio response from the model. + */ + id: string; + } + /** * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of * a function that should be called, as generated by the model. @@ -188,6 +208,54 @@ export namespace ChatCompletionAssistantMessageParam { } } +/** + * If the audio output modality is requested, this object contains data about the + * audio response from the model. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ +export interface ChatCompletionAudio { + /** + * Unique identifier for this audio response. + */ + id: string; + + /** + * Base64 encoded audio bytes generated by the model, in the format specified in + * the request. + */ + data: string; + + /** + * The Unix timestamp (in seconds) for when this audio response will no longer be + * accessible on the server for use in multi-turn conversations. + */ + expires_at: number; + + /** + * Transcript of the audio generated by the model. + */ + transcript: string; +} + +/** + * Parameters for audio output. Required when audio output is requested with + * `modalities: ["audio"]`. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ +export interface ChatCompletionAudioParam { + /** + * Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, + * or `pcm16`. + */ + format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; + + /** + * Specifies the voice type. Supported voices are `alloy`, `echo`, `fable`, `onyx`, + * `nova`, and `shimmer`. + */ + voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; +} + /** * Represents a streamed chunk of a chat completion response returned by model, * based on the provided input. @@ -371,8 +439,18 @@ export namespace ChatCompletionChunk { } } -export type ChatCompletionContentPart = ChatCompletionContentPartText | ChatCompletionContentPartImage; +/** + * Learn about + * [text inputs](https://platform.openai.com/docs/guides/text-generation). + */ +export type ChatCompletionContentPart = + | ChatCompletionContentPartText + | ChatCompletionContentPartImage + | ChatCompletionContentPartInputAudio; +/** + * Learn about [image inputs](https://platform.openai.com/docs/guides/vision). + */ export interface ChatCompletionContentPartImage { image_url: ChatCompletionContentPartImage.ImageURL; @@ -397,6 +475,32 @@ export namespace ChatCompletionContentPartImage { } } +/** + * Learn about [audio inputs](https://platform.openai.com/docs/guides/audio). + */ +export interface ChatCompletionContentPartInputAudio { + input_audio: ChatCompletionContentPartInputAudio.InputAudio; + + /** + * The type of the content part. Always `input_audio`. + */ + type: 'input_audio'; +} + +export namespace ChatCompletionContentPartInputAudio { + export interface InputAudio { + /** + * Base64 encoded audio data. + */ + data: string; + + /** + * The format of the encoded audio data. Currently supports "wav" and "mp3". + */ + format: 'wav' | 'mp3'; + } +} + export interface ChatCompletionContentPartRefusal { /** * The refusal message generated by the model. @@ -409,6 +513,10 @@ export interface ChatCompletionContentPartRefusal { type: 'refusal'; } +/** + * Learn about + * [text inputs](https://platform.openai.com/docs/guides/text-generation). + */ export interface ChatCompletionContentPartText { /** * The text content. @@ -471,6 +579,13 @@ export interface ChatCompletionMessage { */ role: 'assistant'; + /** + * If the audio output modality is requested, this object contains data about the + * audio response from the model. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ + audio?: ChatCompletionAudio | null; + /** * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of * a function that should be called, as generated by the model. @@ -548,6 +663,8 @@ export namespace ChatCompletionMessageToolCall { } } +export type ChatCompletionModality = 'text' | 'audio'; + /** * Specifies a tool the model should use. Use to force the model to call a specific * function. @@ -743,6 +860,13 @@ export interface ChatCompletionCreateParamsBase { */ model: (string & {}) | ChatAPI.ChatModel; + /** + * Parameters for audio output. Required when audio output is requested with + * `modalities: ["audio"]`. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ + audio?: ChatCompletionAudioParam | null; + /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on their * existing frequency in the text so far, decreasing the model's likelihood to @@ -812,10 +936,24 @@ export interface ChatCompletionCreateParamsBase { /** * Developer-defined tags and values used for filtering completions in the - * [dashboard](https://platform.openai.com/completions). + * [dashboard](https://platform.openai.com/chat-completions). */ metadata?: Record | null; + /** + * Output types that you would like the model to generate for this request. Most + * models are capable of generating text, which is the default: + * + * `["text"]` + * + * The `gpt-4o-audio-preview` model can also be used to + * [generate audio](https://platform.openai.com/docs/guides/audio). To request that + * this model generate both text and audio responses, you can use: + * + * `["text", "audio"]` + */ + modalities?: Array | null; + /** * How many chat completion choices to generate for each input message. Note that * you will be charged based on the number of generated tokens across all of the @@ -900,8 +1038,9 @@ export interface ChatCompletionCreateParamsBase { stop?: string | null | Array; /** - * Whether or not to store the output of this completion request for traffic - * logging in the [dashboard](https://platform.openai.com/completions). + * Whether or not to store the output of this chat completion request for use in + * our [model distillation](https://platform.openai.com/docs/guides/distillation) + * or [evals](https://platform.openai.com/docs/guides/evals) products. */ store?: boolean | null; @@ -1049,9 +1188,12 @@ export type CompletionCreateParamsStreaming = ChatCompletionCreateParamsStreamin export namespace Completions { export import ChatCompletion = ChatCompletionsAPI.ChatCompletion; export import ChatCompletionAssistantMessageParam = ChatCompletionsAPI.ChatCompletionAssistantMessageParam; + export import ChatCompletionAudio = ChatCompletionsAPI.ChatCompletionAudio; + export import ChatCompletionAudioParam = ChatCompletionsAPI.ChatCompletionAudioParam; export import ChatCompletionChunk = ChatCompletionsAPI.ChatCompletionChunk; export import ChatCompletionContentPart = ChatCompletionsAPI.ChatCompletionContentPart; export import ChatCompletionContentPartImage = ChatCompletionsAPI.ChatCompletionContentPartImage; + export import ChatCompletionContentPartInputAudio = ChatCompletionsAPI.ChatCompletionContentPartInputAudio; export import ChatCompletionContentPartRefusal = ChatCompletionsAPI.ChatCompletionContentPartRefusal; export import ChatCompletionContentPartText = ChatCompletionsAPI.ChatCompletionContentPartText; export import ChatCompletionFunctionCallOption = ChatCompletionsAPI.ChatCompletionFunctionCallOption; @@ -1059,6 +1201,7 @@ export namespace Completions { export import ChatCompletionMessage = ChatCompletionsAPI.ChatCompletionMessage; export import ChatCompletionMessageParam = ChatCompletionsAPI.ChatCompletionMessageParam; export import ChatCompletionMessageToolCall = ChatCompletionsAPI.ChatCompletionMessageToolCall; + export import ChatCompletionModality = ChatCompletionsAPI.ChatCompletionModality; export import ChatCompletionNamedToolChoice = ChatCompletionsAPI.ChatCompletionNamedToolChoice; export import ChatCompletionRole = ChatCompletionsAPI.ChatCompletionRole; export import ChatCompletionStreamOptions = ChatCompletionsAPI.ChatCompletionStreamOptions; diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index 748770948..22803e819 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -3,9 +3,12 @@ export { ChatCompletion, ChatCompletionAssistantMessageParam, + ChatCompletionAudio, + ChatCompletionAudioParam, ChatCompletionChunk, ChatCompletionContentPart, ChatCompletionContentPartImage, + ChatCompletionContentPartInputAudio, ChatCompletionContentPartRefusal, ChatCompletionContentPartText, ChatCompletionFunctionCallOption, @@ -13,6 +16,7 @@ export { ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletionMessageToolCall, + ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionRole, ChatCompletionStreamOptions, diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 4f015b47e..77d4a251c 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -27,6 +27,7 @@ describe('resource completions', () => { const response = await client.chat.completions.create({ messages: [{ content: 'string', role: 'system', name: 'name' }], model: 'gpt-4o', + audio: { format: 'wav', voice: 'alloy' }, frequency_penalty: -2, function_call: 'none', functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], @@ -35,6 +36,7 @@ describe('resource completions', () => { max_completion_tokens: 0, max_tokens: 0, metadata: { foo: 'string' }, + modalities: ['text', 'audio'], n: 1, parallel_tool_calls: true, presence_penalty: -2, From 6ae19ce08eea4f6b3c4865861a6cce09d403cac8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 17:19:37 +0000 Subject: [PATCH 289/533] release: 4.68.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e8c54ecee..91b39801d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.67.3" + ".": "4.68.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 710d09ca9..2fcd3be4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.68.0 (2024-10-17) + +Full Changelog: [v4.67.3...v4.68.0](https://github.com/openai/openai-node/compare/v4.67.3...v4.68.0) + +### Features + +* **api:** add gpt-4o-audio-preview model for chat completions ([#1135](https://github.com/openai/openai-node/issues/1135)) ([17a623f](https://github.com/openai/openai-node/commit/17a623f70050bca4538ad2939055cd9d9b165f89)) + ## 4.67.3 (2024-10-08) Full Changelog: [v4.67.2...v4.67.3](https://github.com/openai/openai-node/compare/v4.67.2...v4.67.3) diff --git a/README.md b/README.md index 407933634..bbfc821d2 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.67.3/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.68.0/mod.ts'; ``` diff --git a/package.json b/package.json index e20c1b9c1..807c79098 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.67.3", + "version": "4.68.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index f59404dbc..5e813aeb2 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.67.3/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.68.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 174c31111..12aaa52bb 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.67.3'; // x-release-please-version +export const VERSION = '4.68.0'; // x-release-please-version From 02fd7699130e2ff442aca45622b064bc4eda6fab Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 18 Oct 2024 18:12:40 +0000 Subject: [PATCH 290/533] fix(client): respect x-stainless-retry-count default headers (#1138) --- src/core.ts | 10 +++++++--- tests/index.test.ts | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/src/core.ts b/src/core.ts index d78e9e926..9d90178ab 100644 --- a/src/core.ts +++ b/src/core.ts @@ -386,9 +386,13 @@ export abstract class APIClient { delete reqHeaders['content-type']; } - // Don't set the retry count header if it was already set or removed by the caller. We check `headers`, - // which can contain nulls, instead of `reqHeaders` to account for the removal case. - if (getHeader(headers, 'x-stainless-retry-count') === undefined) { + // Don't set the retry count header if it was already set or removed through default headers or by the + // caller. We check `defaultHeaders` and `headers`, which can contain nulls, instead of `reqHeaders` to + // account for the removal case. + if ( + getHeader(defaultHeaders, 'x-stainless-retry-count') === undefined && + getHeader(headers, 'x-stainless-retry-count') === undefined + ) { reqHeaders['x-stainless-retry-count'] = String(retryCount); } diff --git a/tests/index.test.ts b/tests/index.test.ts index b55ec5f67..f39571121 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -295,6 +295,39 @@ describe('retries', () => { expect(capturedRequest!.headers as Headers).not.toHaveProperty('x-stainless-retry-count'); }); + test('omit retry count header by default', async () => { + let count = 0; + let capturedRequest: RequestInit | undefined; + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { + count++; + if (count <= 2) { + return new Response(undefined, { + status: 429, + headers: { + 'Retry-After': '0.1', + }, + }); + } + capturedRequest = init; + return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); + }; + const client = new OpenAI({ + apiKey: 'My API Key', + fetch: testFetch, + maxRetries: 4, + defaultHeaders: { 'X-Stainless-Retry-Count': null }, + }); + + expect( + await client.request({ + path: '/foo', + method: 'get', + }), + ).toEqual({ a: 1 }); + + expect(capturedRequest!.headers as Headers).not.toHaveProperty('x-stainless-retry-count'); + }); + test('overwrite retry count header', async () => { let count = 0; let capturedRequest: RequestInit | undefined; From d08bf1a8fa779e6a9349d92ddf65530dd84e686d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 18 Oct 2024 18:13:06 +0000 Subject: [PATCH 291/533] release: 4.68.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 91b39801d..64f1d21d4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.68.0" + ".": "4.68.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2fcd3be4d..9a2102f46 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.68.1 (2024-10-18) + +Full Changelog: [v4.68.0...v4.68.1](https://github.com/openai/openai-node/compare/v4.68.0...v4.68.1) + +### Bug Fixes + +* **client:** respect x-stainless-retry-count default headers ([#1138](https://github.com/openai/openai-node/issues/1138)) ([266717b](https://github.com/openai/openai-node/commit/266717b3301828c7df735064a380a055576183bc)) + ## 4.68.0 (2024-10-17) Full Changelog: [v4.67.3...v4.68.0](https://github.com/openai/openai-node/compare/v4.67.3...v4.68.0) diff --git a/README.md b/README.md index bbfc821d2..d4b838897 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.68.0/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.68.1/mod.ts'; ``` diff --git a/package.json b/package.json index 807c79098..538163b0f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.68.0", + "version": "4.68.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 5e813aeb2..b7459b609 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.68.0/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.68.1/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 12aaa52bb..dcff7c8bd 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.68.0'; // x-release-please-version +export const VERSION = '4.68.1'; // x-release-please-version From 9b27b22f83756c91c9277ce8334da2120b6afe90 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 18:23:54 +0000 Subject: [PATCH 292/533] chore(internal): update spec version (#1141) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 984e8a8d5..e1a430e50 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8729aaa35436531ab453224af10e67f89677db8f350f0346bb3537489edea649.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-f9320ebf347140052c7f8b0bc5c7db24f5e367c368c8cb34c3606af4e2b6591b.yml From ab7770115e88ff1274cf8863afddd6b58f8f158f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 18:24:22 +0000 Subject: [PATCH 293/533] release: 4.68.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 64f1d21d4..7de9a93f1 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.68.1" + ".": "4.68.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a2102f46..93cf66d4e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.68.2 (2024-10-22) + +Full Changelog: [v4.68.1...v4.68.2](https://github.com/openai/openai-node/compare/v4.68.1...v4.68.2) + +### Chores + +* **internal:** update spec version ([#1141](https://github.com/openai/openai-node/issues/1141)) ([2ccb3e3](https://github.com/openai/openai-node/commit/2ccb3e357aa2f3eb0fa32c619d8336c3b94cc882)) + ## 4.68.1 (2024-10-18) Full Changelog: [v4.68.0...v4.68.1](https://github.com/openai/openai-node/compare/v4.68.0...v4.68.1) diff --git a/README.md b/README.md index d4b838897..5011b82a1 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.68.1/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.68.2/mod.ts'; ``` diff --git a/package.json b/package.json index 538163b0f..0eaebee91 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.68.1", + "version": "4.68.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index b7459b609..c2276e5ea 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.68.1/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.68.2/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index dcff7c8bd..bb7f3f7bd 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.68.1'; // x-release-please-version +export const VERSION = '4.68.2'; // x-release-please-version From 58a645d572572a3de2688a1fd8511f3edab97866 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 20:11:28 +0000 Subject: [PATCH 294/533] chore(internal): bumps eslint and related dependencies (#1143) --- yarn.lock | 180 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 92 insertions(+), 88 deletions(-) diff --git a/yarn.lock b/yarn.lock index 5a01e39e3..91b22b941 100644 --- a/yarn.lock +++ b/yarn.lock @@ -322,9 +322,9 @@ eslint-visitor-keys "^3.3.0" "@eslint-community/regexpp@^4.5.1": - version "4.9.0" - resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.9.0.tgz#7ccb5f58703fa61ffdcbf39e2c604a109e781162" - integrity sha512-zJmuCWj2VLBt4c25CfBIbMZLGLyhkvs7LznyVX5HfpzeocThgIj5XQK4L+g3U36mMcx8bPMhGyPpwCATamC4jQ== + version "4.11.1" + resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.11.1.tgz#a547badfc719eb3e5f4b556325e542fbe9d7a18f" + integrity sha512-m4DVN9ZqskZoLU5GlWZadwDnYo3vAEydiUayB9widCl9ffWx2IvPnp6n3on5rJmziJSw9Bv+Z3ChDVdMwXCY8Q== "@eslint-community/regexpp@^4.6.1": version "4.6.2" @@ -857,9 +857,9 @@ pretty-format "^29.0.0" "@types/json-schema@^7.0.12": - version "7.0.13" - resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.13.tgz#02c24f4363176d2d18fc8b70b9f3c54aba178a85" - integrity sha512-RbSSoHliUbnXj3ny0CNFOoxrIDV6SUGyStHsvDqosw6CkdPV8TtWGlfecuK4ToyMEAql6pzNxgCFKanovUzlgQ== + version "7.0.15" + resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841" + integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== "@types/node-fetch@^2.6.4": version "2.6.4" @@ -882,9 +882,9 @@ integrity sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA== "@types/semver@^7.5.0": - version "7.5.3" - resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.3.tgz#9a726e116beb26c24f1ccd6850201e1246122e04" - integrity sha512-OxepLK9EuNEIPxWNME+C6WwbRAOOI2o2BaQEGzz5Lu2e4Z5eDnEo+/aVEDMIXywoJitJ7xWd641wrGLZdtwRyw== + version "7.5.8" + resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.8.tgz#8268a8c57a3e4abd25c165ecd36237db7948a55e" + integrity sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ== "@types/stack-utils@^2.0.0": version "2.0.3" @@ -904,15 +904,15 @@ "@types/yargs-parser" "*" "@typescript-eslint/eslint-plugin@^6.7.0": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.7.3.tgz#d98046e9f7102d49a93d944d413c6055c47fafd7" - integrity sha512-vntq452UHNltxsaaN+L9WyuMch8bMd9CqJ3zhzTPXXidwbf5mqqKCVXEuvRZUqLJSTLeWE65lQwyXsRGnXkCTA== + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz#30830c1ca81fd5f3c2714e524c4303e0194f9cd3" + integrity sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA== dependencies: "@eslint-community/regexpp" "^4.5.1" - "@typescript-eslint/scope-manager" "6.7.3" - "@typescript-eslint/type-utils" "6.7.3" - "@typescript-eslint/utils" "6.7.3" - "@typescript-eslint/visitor-keys" "6.7.3" + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/type-utils" "6.21.0" + "@typescript-eslint/utils" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" debug "^4.3.4" graphemer "^1.4.0" ignore "^5.2.4" @@ -921,71 +921,72 @@ ts-api-utils "^1.0.1" "@typescript-eslint/parser@^6.7.0": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-6.7.3.tgz#aaf40092a32877439e5957e18f2d6a91c82cc2fd" - integrity sha512-TlutE+iep2o7R8Lf+yoer3zU6/0EAUc8QIBB3GYBc1KGz4c4TRm83xwXUZVPlZ6YCLss4r77jbu6j3sendJoiQ== - dependencies: - "@typescript-eslint/scope-manager" "6.7.3" - "@typescript-eslint/types" "6.7.3" - "@typescript-eslint/typescript-estree" "6.7.3" - "@typescript-eslint/visitor-keys" "6.7.3" + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-6.21.0.tgz#af8fcf66feee2edc86bc5d1cf45e33b0630bf35b" + integrity sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ== + dependencies: + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/typescript-estree" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" debug "^4.3.4" -"@typescript-eslint/scope-manager@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.7.3.tgz#07e5709c9bdae3eaf216947433ef97b3b8b7d755" - integrity sha512-wOlo0QnEou9cHO2TdkJmzF7DFGvAKEnB82PuPNHpT8ZKKaZu6Bm63ugOTn9fXNJtvuDPanBc78lGUGGytJoVzQ== +"@typescript-eslint/scope-manager@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz#ea8a9bfc8f1504a6ac5d59a6df308d3a0630a2b1" + integrity sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg== dependencies: - "@typescript-eslint/types" "6.7.3" - "@typescript-eslint/visitor-keys" "6.7.3" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" -"@typescript-eslint/type-utils@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-6.7.3.tgz#c2c165c135dda68a5e70074ade183f5ad68f3400" - integrity sha512-Fc68K0aTDrKIBvLnKTZ5Pf3MXK495YErrbHb1R6aTpfK5OdSFj0rVN7ib6Tx6ePrZ2gsjLqr0s98NG7l96KSQw== +"@typescript-eslint/type-utils@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz#6473281cfed4dacabe8004e8521cee0bd9d4c01e" + integrity sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag== dependencies: - "@typescript-eslint/typescript-estree" "6.7.3" - "@typescript-eslint/utils" "6.7.3" + "@typescript-eslint/typescript-estree" "6.21.0" + "@typescript-eslint/utils" "6.21.0" debug "^4.3.4" ts-api-utils "^1.0.1" -"@typescript-eslint/types@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.7.3.tgz#0402b5628a63f24f2dc9d4a678e9a92cc50ea3e9" - integrity sha512-4g+de6roB2NFcfkZb439tigpAMnvEIg3rIjWQ+EM7IBaYt/CdJt6em9BJ4h4UpdgaBWdmx2iWsafHTrqmgIPNw== +"@typescript-eslint/types@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.21.0.tgz#205724c5123a8fef7ecd195075fa6e85bac3436d" + integrity sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg== -"@typescript-eslint/typescript-estree@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.3.tgz#ec5bb7ab4d3566818abaf0e4a8fa1958561b7279" - integrity sha512-YLQ3tJoS4VxLFYHTw21oe1/vIZPRqAO91z6Uv0Ss2BKm/Ag7/RVQBcXTGcXhgJMdA4U+HrKuY5gWlJlvoaKZ5g== +"@typescript-eslint/typescript-estree@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz#c47ae7901db3b8bddc3ecd73daff2d0895688c46" + integrity sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ== dependencies: - "@typescript-eslint/types" "6.7.3" - "@typescript-eslint/visitor-keys" "6.7.3" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" debug "^4.3.4" globby "^11.1.0" is-glob "^4.0.3" + minimatch "9.0.3" semver "^7.5.4" ts-api-utils "^1.0.1" -"@typescript-eslint/utils@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-6.7.3.tgz#96c655816c373135b07282d67407cb577f62e143" - integrity sha512-vzLkVder21GpWRrmSR9JxGZ5+ibIUSudXlW52qeKpzUEQhRSmyZiVDDj3crAth7+5tmN1ulvgKaCU2f/bPRCzg== +"@typescript-eslint/utils@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-6.21.0.tgz#4714e7a6b39e773c1c8e97ec587f520840cd8134" + integrity sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ== dependencies: "@eslint-community/eslint-utils" "^4.4.0" "@types/json-schema" "^7.0.12" "@types/semver" "^7.5.0" - "@typescript-eslint/scope-manager" "6.7.3" - "@typescript-eslint/types" "6.7.3" - "@typescript-eslint/typescript-estree" "6.7.3" + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/typescript-estree" "6.21.0" semver "^7.5.4" -"@typescript-eslint/visitor-keys@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.3.tgz#83809631ca12909bd2083558d2f93f5747deebb2" - integrity sha512-HEVXkU9IB+nk9o63CeICMHxFWbHWr3E1mpilIQBe9+7L/lH97rleFLVtYsfnWB+JVMaiFnEaxvknvmIzX+CqVg== +"@typescript-eslint/visitor-keys@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz#87a99d077aa507e20e238b11d56cc26ade45fe47" + integrity sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A== dependencies: - "@typescript-eslint/types" "6.7.3" + "@typescript-eslint/types" "6.21.0" eslint-visitor-keys "^3.4.1" abort-controller@^3.0.0: @@ -1392,13 +1393,20 @@ cross-spawn@^7.0.2, cross-spawn@^7.0.3: shebang-command "^2.0.0" which "^2.0.1" -debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4: +debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2: version "4.3.4" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== dependencies: ms "2.1.2" +debug@^4.3.4: + version "4.3.7" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.7.tgz#87945b4151a011d76d95a198d7111c865c360a52" + integrity sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ== + dependencies: + ms "^2.1.3" + dedent@^1.0.0: version "1.5.1" resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.1.tgz#4f3fc94c8b711e9bb2800d185cd6ad20f2a90aff" @@ -1546,12 +1554,7 @@ eslint-scope@^7.2.2: esrecurse "^4.3.0" estraverse "^5.2.0" -eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1: - version "3.4.2" - resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.2.tgz#8c2095440eca8c933bedcadf16fefa44dbe9ba5f" - integrity sha512-8drBzUEyZ2llkpCA67iYrgEssKDUu68V8ChqqOfFupIaG/LCVPUT+CoGJpT77zJprs4T/W7p07LP7zAIMuweVw== - -eslint-visitor-keys@^3.4.3: +eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1, eslint-visitor-keys@^3.4.3: version "3.4.3" resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz#0cd72fe8550e3c2eae156a96a4dddcd1c8ac5800" integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== @@ -1716,18 +1719,7 @@ fast-glob@^3.2.12: merge2 "^1.3.0" micromatch "^4.0.4" -fast-glob@^3.2.9: - version "3.3.1" - resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.1.tgz#784b4e897340f3dbbef17413b3f11acf03c874c4" - integrity sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg== - dependencies: - "@nodelib/fs.stat" "^2.0.2" - "@nodelib/fs.walk" "^1.2.3" - glob-parent "^5.1.2" - merge2 "^1.3.0" - micromatch "^4.0.4" - -fast-glob@^3.3.0: +fast-glob@^3.2.9, fast-glob@^3.3.0: version "3.3.2" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129" integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow== @@ -1749,9 +1741,9 @@ fast-levenshtein@^2.0.6: integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== fastq@^1.6.0: - version "1.15.0" - resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.15.0.tgz#d04d07c6a2a68fe4599fea8d2e103a937fae6b3a" - integrity sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw== + version "1.17.1" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47" + integrity sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w== dependencies: reusify "^1.0.4" @@ -1974,9 +1966,9 @@ iconv-lite@^0.6.3: safer-buffer ">= 2.1.2 < 3.0.0" ignore@^5.2.0, ignore@^5.2.4: - version "5.2.4" - resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.4.tgz#a291c0c6178ff1b960befe47fcdec301674a6324" - integrity sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ== + version "5.3.2" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5" + integrity sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== import-fresh@^3.2.1: version "3.3.0" @@ -2681,6 +2673,13 @@ mimic-fn@^4.0.0: resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-4.0.0.tgz#60a90550d5cb0b239cca65d893b1a53b29871ecc" integrity sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw== +minimatch@9.0.3: + version "9.0.3" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.3.tgz#a6e00c3de44c3a542bfaae70abfc22420a6da825" + integrity sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg== + dependencies: + brace-expansion "^2.0.1" + minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2: version "3.1.2" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" @@ -2710,7 +2709,7 @@ ms@2.1.2: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@^2.0.0: +ms@^2.0.0, ms@^2.1.3: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== @@ -3075,13 +3074,18 @@ semver@^6.3.0, semver@^6.3.1: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.5.3, semver@^7.5.4: +semver@^7.5.3: version "7.5.4" resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e" integrity sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA== dependencies: lru-cache "^6.0.0" +semver@^7.5.4: + version "7.6.3" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" + integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== + shebang-command@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" @@ -3278,9 +3282,9 @@ tr46@~0.0.3: integrity sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o= ts-api-utils@^1.0.1: - version "1.0.3" - resolved "https://registry.yarnpkg.com/ts-api-utils/-/ts-api-utils-1.0.3.tgz#f12c1c781d04427313dbac808f453f050e54a331" - integrity sha512-wNMeqtMz5NtwpT/UZGY5alT+VoKdSsOOP/kqHFcUW1P/VRhH2wJ48+DN2WwUliNbQ976ETwDL0Ifd2VVvgonvg== + version "1.3.0" + resolved "https://registry.yarnpkg.com/ts-api-utils/-/ts-api-utils-1.3.0.tgz#4b490e27129f1e8e686b45cc4ab63714dc60eea1" + integrity sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ== ts-jest@^29.1.0: version "29.1.1" From c239b5cf7723e825250a26cabefeb27aa398be23 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 05:07:12 +0000 Subject: [PATCH 295/533] release: 4.68.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7de9a93f1..eafafb2cf 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.68.2" + ".": "4.68.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 93cf66d4e..604e5183c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.68.3 (2024-10-23) + +Full Changelog: [v4.68.2...v4.68.3](https://github.com/openai/openai-node/compare/v4.68.2...v4.68.3) + +### Chores + +* **internal:** bumps eslint and related dependencies ([#1143](https://github.com/openai/openai-node/issues/1143)) ([2643f42](https://github.com/openai/openai-node/commit/2643f42a36208c36daf23470ffcd227a891284eb)) + ## 4.68.2 (2024-10-22) Full Changelog: [v4.68.1...v4.68.2](https://github.com/openai/openai-node/compare/v4.68.1...v4.68.2) diff --git a/README.md b/README.md index 5011b82a1..16d0450bb 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.68.2/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.68.3/mod.ts'; ``` diff --git a/package.json b/package.json index 0eaebee91..4fc07b525 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.68.2", + "version": "4.68.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index c2276e5ea..fa0fd26ea 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.68.2/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.68.3/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index bb7f3f7bd..2657a62ac 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.68.2'; // x-release-please-version +export const VERSION = '4.68.3'; // x-release-please-version From 748d77154f570b705c198ccd802a7dc0863690d5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 21:56:39 +0000 Subject: [PATCH 296/533] chore(internal): update spec version (#1146) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index e1a430e50..0b0872556 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-f9320ebf347140052c7f8b0bc5c7db24f5e367c368c8cb34c3606af4e2b6591b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b60d5559d5150ecd3b49136064e5e251d832899770ff385b711378389afba370.yml From 813cb4413d7b03bddf6885df7fd4c5928e2ec49f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 21:57:05 +0000 Subject: [PATCH 297/533] release: 4.68.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index eafafb2cf..b32797c27 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.68.3" + ".": "4.68.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 604e5183c..130b287c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.68.4 (2024-10-23) + +Full Changelog: [v4.68.3...v4.68.4](https://github.com/openai/openai-node/compare/v4.68.3...v4.68.4) + +### Chores + +* **internal:** update spec version ([#1146](https://github.com/openai/openai-node/issues/1146)) ([0165a8d](https://github.com/openai/openai-node/commit/0165a8d79340ede49557e05fd00d6fff9d69d930)) + ## 4.68.3 (2024-10-23) Full Changelog: [v4.68.2...v4.68.3](https://github.com/openai/openai-node/compare/v4.68.2...v4.68.3) diff --git a/README.md b/README.md index 16d0450bb..3bf20a026 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.68.3/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.68.4/mod.ts'; ``` diff --git a/package.json b/package.json index 4fc07b525..ce87796d1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.68.3", + "version": "4.68.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index fa0fd26ea..6a67bcdde 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.68.3/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.68.4/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 2657a62ac..5c2c17eaf 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.68.3'; // x-release-please-version +export const VERSION = '4.68.4'; // x-release-please-version From d4966566cb2d804b9892986fe4871eb051a416f0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 17:46:33 +0000 Subject: [PATCH 298/533] docs(readme): minor typo fixes (#1154) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3bf20a026..9aabd058e 100644 --- a/README.md +++ b/README.md @@ -444,7 +444,7 @@ Note that requests which time out will be [retried twice by default](#retries). ## Auto-pagination List methods in the OpenAI API are paginated. -You can use `for await … of` syntax to iterate through items across all pages: +You can use the `for await … of` syntax to iterate through items across all pages: ```ts async function fetchAllFineTuningJobs(params) { @@ -457,7 +457,7 @@ async function fetchAllFineTuningJobs(params) { } ``` -Alternatively, you can make request a single page at a time: +Alternatively, you can request a single page at a time: ```ts let page = await client.fineTuning.jobs.list({ limit: 20 }); From 8cafc09f3f4795d9a904f63b067d4f05292dab74 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 20:33:10 +0000 Subject: [PATCH 299/533] fix(internal): support pnpm git installs (#1156) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index ce87796d1..9f9b3ee86 100644 --- a/package.json +++ b/package.json @@ -10,7 +10,7 @@ "license": "Apache-2.0", "packageManager": "yarn@1.22.22", "files": [ - "*" + "**/*" ], "private": false, "scripts": { From b8e5d396b1524aaeef31c408d79fb30314e50577 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:11:29 +0000 Subject: [PATCH 300/533] feat(api): add new, expressive voices for Realtime and Audio in Chat Completions (#1157) https://platform.openai.com/docs/changelog --- .stats.yml | 2 +- src/resources/chat/completions.ts | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.stats.yml b/.stats.yml index 0b0872556..39413df44 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b60d5559d5150ecd3b49136064e5e251d832899770ff385b711378389afba370.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7b0a5d715d94f75ac7795bd4d2175a0e3243af9b935a86c273f371e45583140f.yml diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 97174ec1b..d439e9a25 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -250,10 +250,10 @@ export interface ChatCompletionAudioParam { format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; /** - * Specifies the voice type. Supported voices are `alloy`, `echo`, `fable`, `onyx`, - * `nova`, and `shimmer`. + * The voice the model uses to respond. Supported voices are `alloy`, `ash`, + * `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. */ - voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; + voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; } /** @@ -308,7 +308,7 @@ export interface ChatCompletionChunk { * contains a null value except for the last chunk which contains the token usage * statistics for the entire request. */ - usage?: CompletionsAPI.CompletionUsage; + usage?: CompletionsAPI.CompletionUsage | null; } export namespace ChatCompletionChunk { From 622c80aaa17c486cbd16cf620b3bee6b73650ba4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:11:59 +0000 Subject: [PATCH 301/533] release: 4.69.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 18 ++++++++++++++++++ README.md | 2 +- package.json | 2 +- scripts/build-deno | 2 +- src/version.ts | 2 +- 6 files changed, 23 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b32797c27..65aac9575 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.68.4" + ".": "4.69.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 130b287c2..b3b52aaa3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 4.69.0 (2024-10-30) + +Full Changelog: [v4.68.4...v4.69.0](https://github.com/openai/openai-node/compare/v4.68.4...v4.69.0) + +### Features + +* **api:** add new, expressive voices for Realtime and Audio in Chat Completions ([#1157](https://github.com/openai/openai-node/issues/1157)) ([12e501c](https://github.com/openai/openai-node/commit/12e501c8a215a2af29b9b8fceedc5935b6f2feef)) + + +### Bug Fixes + +* **internal:** support pnpm git installs ([#1156](https://github.com/openai/openai-node/issues/1156)) ([b744c5b](https://github.com/openai/openai-node/commit/b744c5b609533e9a6694d6cae0425fe9cd37e26c)) + + +### Documentation + +* **readme:** minor typo fixes ([#1154](https://github.com/openai/openai-node/issues/1154)) ([c6c9f9a](https://github.com/openai/openai-node/commit/c6c9f9aaf75f643016ad73574a7e24a228b5c60f)) + ## 4.68.4 (2024-10-23) Full Changelog: [v4.68.3...v4.68.4](https://github.com/openai/openai-node/compare/v4.68.3...v4.68.4) diff --git a/README.md b/README.md index 9aabd058e..776ea4049 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.68.4/mod.ts'; +import OpenAI from 'https://deno.land/x/openai@v4.69.0/mod.ts'; ``` diff --git a/package.json b/package.json index 9f9b3ee86..9e32feabb 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.68.4", + "version": "4.69.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/scripts/build-deno b/scripts/build-deno index 6a67bcdde..be17942df 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -16,7 +16,7 @@ This is a build produced from https://github.com/openai/openai-node – please g Usage: \`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.68.4/mod.ts"; +import OpenAI from "https://deno.land/x/openai@v4.69.0/mod.ts"; const client = new OpenAI(); \`\`\` diff --git a/src/version.ts b/src/version.ts index 5c2c17eaf..be250f2d6 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.68.4'; // x-release-please-version +export const VERSION = '4.69.0'; // x-release-please-version From 6421d69314e89cde4a85d2a70f1dae4cf570d0f7 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 1 Nov 2024 04:32:16 +0000 Subject: [PATCH 302/533] refactor: use type imports for type-only imports (#1159) --- src/index.ts | 377 ++++++++++++------ src/lib/AssistantStream.ts | 3 +- src/resources/audio/audio.ts | 70 ++-- src/resources/audio/index.ts | 24 +- src/resources/audio/speech.ts | 6 +- src/resources/audio/transcriptions.ts | 21 +- src/resources/audio/translations.ts | 17 +- src/resources/batches.ts | 18 +- src/resources/beta/assistants.ts | 37 +- src/resources/beta/beta.ts | 158 +++++--- src/resources/beta/index.ts | 82 ++-- src/resources/beta/threads/index.ts | 126 +++--- src/resources/beta/threads/messages.ts | 71 ++-- src/resources/beta/threads/runs/index.ts | 72 ++-- src/resources/beta/threads/runs/runs.ts | 113 ++++-- src/resources/beta/threads/runs/steps.ts | 48 ++- src/resources/beta/threads/threads.ts | 198 ++++++--- .../beta/vector-stores/file-batches.ts | 11 +- src/resources/beta/vector-stores/files.ts | 17 +- src/resources/beta/vector-stores/index.ts | 48 +-- .../beta/vector-stores/vector-stores.ts | 76 ++-- src/resources/chat/chat.ts | 114 ++++-- src/resources/chat/completions.ts | 71 ++-- src/resources/chat/index.ts | 66 +-- src/resources/completions.ts | 16 +- src/resources/embeddings.ts | 13 +- src/resources/files.ts | 21 +- src/resources/fine-tuning/fine-tuning.ts | 43 +- src/resources/fine-tuning/index.ts | 16 +- src/resources/fine-tuning/jobs/checkpoints.ts | 13 +- src/resources/fine-tuning/jobs/index.ts | 28 +- src/resources/fine-tuning/jobs/jobs.ts | 47 ++- src/resources/images.ts | 17 +- src/resources/index.ts | 74 ++-- src/resources/models.ts | 9 +- src/resources/moderations.ts | 19 +- src/resources/uploads/index.ts | 4 +- src/resources/uploads/parts.ts | 6 +- src/resources/uploads/uploads.ts | 19 +- tsconfig.json | 2 +- 40 files changed, 1307 insertions(+), 884 deletions(-) diff --git a/src/index.ts b/src/index.ts index 56108223a..c1506997b 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,12 +1,108 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Errors from './error'; -import * as Uploads from './uploads'; import { type Agent, type RequestInit } from './_shims/index'; import * as qs from './internal/qs'; import * as Core from './core'; +import * as Errors from './error'; import * as Pagination from './pagination'; +import { type CursorPageParams, CursorPageResponse, PageResponse } from './pagination'; +import * as Uploads from './uploads'; import * as API from './resources/index'; +import { + Batch, + BatchCreateParams, + BatchError, + BatchListParams, + BatchRequestCounts, + Batches, + BatchesPage, +} from './resources/batches'; +import { + Completion, + CompletionChoice, + CompletionCreateParams, + CompletionCreateParamsNonStreaming, + CompletionCreateParamsStreaming, + CompletionUsage, + Completions, +} from './resources/completions'; +import { + CreateEmbeddingResponse, + Embedding, + EmbeddingCreateParams, + EmbeddingModel, + Embeddings, +} from './resources/embeddings'; +import { + FileContent, + FileCreateParams, + FileDeleted, + FileListParams, + FileObject, + FileObjectsPage, + FilePurpose, + Files, +} from './resources/files'; +import { + Image, + ImageCreateVariationParams, + ImageEditParams, + ImageGenerateParams, + ImageModel, + Images, + ImagesResponse, +} from './resources/images'; +import { Model, ModelDeleted, Models, ModelsPage } from './resources/models'; +import { + Moderation, + ModerationCreateParams, + ModerationCreateResponse, + ModerationImageURLInput, + ModerationModel, + ModerationMultiModalInput, + ModerationTextInput, + Moderations, +} from './resources/moderations'; +import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio'; +import { Beta } from './resources/beta/beta'; +import { Chat, ChatModel } from './resources/chat/chat'; +import { + ChatCompletion, + ChatCompletionAssistantMessageParam, + ChatCompletionAudio, + ChatCompletionAudioParam, + ChatCompletionChunk, + ChatCompletionContentPart, + ChatCompletionContentPartImage, + ChatCompletionContentPartInputAudio, + ChatCompletionContentPartRefusal, + ChatCompletionContentPartText, + ChatCompletionCreateParams, + ChatCompletionCreateParamsNonStreaming, + ChatCompletionCreateParamsStreaming, + ChatCompletionFunctionCallOption, + ChatCompletionFunctionMessageParam, + ChatCompletionMessage, + ChatCompletionMessageParam, + ChatCompletionMessageToolCall, + ChatCompletionModality, + ChatCompletionNamedToolChoice, + ChatCompletionRole, + ChatCompletionStreamOptions, + ChatCompletionSystemMessageParam, + ChatCompletionTokenLogprob, + ChatCompletionTool, + ChatCompletionToolChoiceOption, + ChatCompletionToolMessageParam, + ChatCompletionUserMessageParam, +} from './resources/chat/completions'; +import { FineTuning } from './resources/fine-tuning/fine-tuning'; +import { + Upload, + UploadCompleteParams, + UploadCreateParams, + Uploads as UploadsAPIUploads, +} from './resources/uploads/uploads'; export interface ClientOptions { /** @@ -209,138 +305,167 @@ export class OpenAI extends Core.APIClient { static fileFromPath = Uploads.fileFromPath; } -export const { - OpenAIError, - APIError, - APIConnectionError, - APIConnectionTimeoutError, - APIUserAbortError, - NotFoundError, - ConflictError, - RateLimitError, - BadRequestError, - AuthenticationError, - InternalServerError, - PermissionDeniedError, - UnprocessableEntityError, -} = Errors; +export const OpenAIError = Errors.OpenAIError; +export const APIError = Errors.APIError; +export const APIConnectionError = Errors.APIConnectionError; +export const APIConnectionTimeoutError = Errors.APIConnectionTimeoutError; +export const APIUserAbortError = Errors.APIUserAbortError; +export const NotFoundError = Errors.NotFoundError; +export const ConflictError = Errors.ConflictError; +export const RateLimitError = Errors.RateLimitError; +export const BadRequestError = Errors.BadRequestError; +export const AuthenticationError = Errors.AuthenticationError; +export const InternalServerError = Errors.InternalServerError; +export const PermissionDeniedError = Errors.PermissionDeniedError; +export const UnprocessableEntityError = Errors.UnprocessableEntityError; export import toFile = Uploads.toFile; export import fileFromPath = Uploads.fileFromPath; -export namespace OpenAI { - export import RequestOptions = Core.RequestOptions; +OpenAI.Completions = Completions; +OpenAI.Chat = Chat; +OpenAI.Embeddings = Embeddings; +OpenAI.Files = Files; +OpenAI.FileObjectsPage = FileObjectsPage; +OpenAI.Images = Images; +OpenAI.Audio = Audio; +OpenAI.Moderations = Moderations; +OpenAI.Models = Models; +OpenAI.ModelsPage = ModelsPage; +OpenAI.FineTuning = FineTuning; +OpenAI.Beta = Beta; +OpenAI.Batches = Batches; +OpenAI.BatchesPage = BatchesPage; +OpenAI.Uploads = UploadsAPIUploads; + +export declare namespace OpenAI { + export type RequestOptions = Core.RequestOptions; export import Page = Pagination.Page; - export import PageResponse = Pagination.PageResponse; + export { type PageResponse as PageResponse }; export import CursorPage = Pagination.CursorPage; - export import CursorPageParams = Pagination.CursorPageParams; - export import CursorPageResponse = Pagination.CursorPageResponse; - - export import Completions = API.Completions; - export import Completion = API.Completion; - export import CompletionChoice = API.CompletionChoice; - export import CompletionUsage = API.CompletionUsage; - export import CompletionCreateParams = API.CompletionCreateParams; - export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; - export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; - - export import Chat = API.Chat; - export import ChatModel = API.ChatModel; - export import ChatCompletion = API.ChatCompletion; - export import ChatCompletionAssistantMessageParam = API.ChatCompletionAssistantMessageParam; - export import ChatCompletionAudio = API.ChatCompletionAudio; - export import ChatCompletionAudioParam = API.ChatCompletionAudioParam; - export import ChatCompletionChunk = API.ChatCompletionChunk; - export import ChatCompletionContentPart = API.ChatCompletionContentPart; - export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage; - export import ChatCompletionContentPartInputAudio = API.ChatCompletionContentPartInputAudio; - export import ChatCompletionContentPartRefusal = API.ChatCompletionContentPartRefusal; - export import ChatCompletionContentPartText = API.ChatCompletionContentPartText; - export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption; - export import ChatCompletionFunctionMessageParam = API.ChatCompletionFunctionMessageParam; - export import ChatCompletionMessage = API.ChatCompletionMessage; - export import ChatCompletionMessageParam = API.ChatCompletionMessageParam; - export import ChatCompletionMessageToolCall = API.ChatCompletionMessageToolCall; - export import ChatCompletionModality = API.ChatCompletionModality; - export import ChatCompletionNamedToolChoice = API.ChatCompletionNamedToolChoice; - export import ChatCompletionRole = API.ChatCompletionRole; - export import ChatCompletionStreamOptions = API.ChatCompletionStreamOptions; - export import ChatCompletionSystemMessageParam = API.ChatCompletionSystemMessageParam; - export import ChatCompletionTokenLogprob = API.ChatCompletionTokenLogprob; - export import ChatCompletionTool = API.ChatCompletionTool; - export import ChatCompletionToolChoiceOption = API.ChatCompletionToolChoiceOption; - export import ChatCompletionToolMessageParam = API.ChatCompletionToolMessageParam; - export import ChatCompletionUserMessageParam = API.ChatCompletionUserMessageParam; - export import ChatCompletionCreateParams = API.ChatCompletionCreateParams; - export import ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; - export import ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; - - export import Embeddings = API.Embeddings; - export import CreateEmbeddingResponse = API.CreateEmbeddingResponse; - export import Embedding = API.Embedding; - export import EmbeddingModel = API.EmbeddingModel; - export import EmbeddingCreateParams = API.EmbeddingCreateParams; - - export import Files = API.Files; - export import FileContent = API.FileContent; - export import FileDeleted = API.FileDeleted; - export import FileObject = API.FileObject; - export import FilePurpose = API.FilePurpose; - export import FileObjectsPage = API.FileObjectsPage; - export import FileCreateParams = API.FileCreateParams; - export import FileListParams = API.FileListParams; - - export import Images = API.Images; - export import Image = API.Image; - export import ImageModel = API.ImageModel; - export import ImagesResponse = API.ImagesResponse; - export import ImageCreateVariationParams = API.ImageCreateVariationParams; - export import ImageEditParams = API.ImageEditParams; - export import ImageGenerateParams = API.ImageGenerateParams; - - export import Audio = API.Audio; - export import AudioModel = API.AudioModel; - export import AudioResponseFormat = API.AudioResponseFormat; - - export import Moderations = API.Moderations; - export import Moderation = API.Moderation; - export import ModerationImageURLInput = API.ModerationImageURLInput; - export import ModerationModel = API.ModerationModel; - export import ModerationMultiModalInput = API.ModerationMultiModalInput; - export import ModerationTextInput = API.ModerationTextInput; - export import ModerationCreateResponse = API.ModerationCreateResponse; - export import ModerationCreateParams = API.ModerationCreateParams; - - export import Models = API.Models; - export import Model = API.Model; - export import ModelDeleted = API.ModelDeleted; - export import ModelsPage = API.ModelsPage; - - export import FineTuning = API.FineTuning; - - export import Beta = API.Beta; - - export import Batches = API.Batches; - export import Batch = API.Batch; - export import BatchError = API.BatchError; - export import BatchRequestCounts = API.BatchRequestCounts; - export import BatchesPage = API.BatchesPage; - export import BatchCreateParams = API.BatchCreateParams; - export import BatchListParams = API.BatchListParams; - - export import Uploads = API.Uploads; - export import Upload = API.Upload; - export import UploadCreateParams = API.UploadCreateParams; - export import UploadCompleteParams = API.UploadCompleteParams; - - export import ErrorObject = API.ErrorObject; - export import FunctionDefinition = API.FunctionDefinition; - export import FunctionParameters = API.FunctionParameters; - export import ResponseFormatJSONObject = API.ResponseFormatJSONObject; - export import ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; - export import ResponseFormatText = API.ResponseFormatText; + export { type CursorPageParams as CursorPageParams, type CursorPageResponse as CursorPageResponse }; + + export { + Completions as Completions, + type Completion as Completion, + type CompletionChoice as CompletionChoice, + type CompletionUsage as CompletionUsage, + type CompletionCreateParams as CompletionCreateParams, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + }; + + export { + Chat as Chat, + type ChatModel as ChatModel, + type ChatCompletion as ChatCompletion, + type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, + type ChatCompletionAudio as ChatCompletionAudio, + type ChatCompletionAudioParam as ChatCompletionAudioParam, + type ChatCompletionChunk as ChatCompletionChunk, + type ChatCompletionContentPart as ChatCompletionContentPart, + type ChatCompletionContentPartImage as ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, + type ChatCompletionMessage as ChatCompletionMessage, + type ChatCompletionMessageParam as ChatCompletionMessageParam, + type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, + type ChatCompletionModality as ChatCompletionModality, + type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStreamOptions as ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, + type ChatCompletionTool as ChatCompletionTool, + type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type ChatCompletionCreateParams as ChatCompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, + }; + + export { + Embeddings as Embeddings, + type CreateEmbeddingResponse as CreateEmbeddingResponse, + type Embedding as Embedding, + type EmbeddingModel as EmbeddingModel, + type EmbeddingCreateParams as EmbeddingCreateParams, + }; + + export { + Files as Files, + type FileContent as FileContent, + type FileDeleted as FileDeleted, + type FileObject as FileObject, + type FilePurpose as FilePurpose, + FileObjectsPage as FileObjectsPage, + type FileCreateParams as FileCreateParams, + type FileListParams as FileListParams, + }; + + export { + Images as Images, + type Image as Image, + type ImageModel as ImageModel, + type ImagesResponse as ImagesResponse, + type ImageCreateVariationParams as ImageCreateVariationParams, + type ImageEditParams as ImageEditParams, + type ImageGenerateParams as ImageGenerateParams, + }; + + export { Audio as Audio, type AudioModel as AudioModel, type AudioResponseFormat as AudioResponseFormat }; + + export { + Moderations as Moderations, + type Moderation as Moderation, + type ModerationImageURLInput as ModerationImageURLInput, + type ModerationModel as ModerationModel, + type ModerationMultiModalInput as ModerationMultiModalInput, + type ModerationTextInput as ModerationTextInput, + type ModerationCreateResponse as ModerationCreateResponse, + type ModerationCreateParams as ModerationCreateParams, + }; + + export { + Models as Models, + type Model as Model, + type ModelDeleted as ModelDeleted, + ModelsPage as ModelsPage, + }; + + export { FineTuning as FineTuning }; + + export { Beta as Beta }; + + export { + Batches as Batches, + type Batch as Batch, + type BatchError as BatchError, + type BatchRequestCounts as BatchRequestCounts, + BatchesPage as BatchesPage, + type BatchCreateParams as BatchCreateParams, + type BatchListParams as BatchListParams, + }; + + export { + UploadsAPIUploads as Uploads, + type Upload as Upload, + type UploadCreateParams as UploadCreateParams, + type UploadCompleteParams as UploadCompleteParams, + }; + + export type ErrorObject = API.ErrorObject; + export type FunctionDefinition = API.FunctionDefinition; + export type FunctionParameters = API.FunctionParameters; + export type ResponseFormatJSONObject = API.ResponseFormatJSONObject; + export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; + export type ResponseFormatText = API.ResponseFormatText; } // ---------------------- Azure ---------------------- diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index 7c5ffb58e..c826c910e 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -6,7 +6,7 @@ import { Text, ImageFile, TextDelta, - Messages, + MessageDelta, MessageContent, } from 'openai/resources/beta/threads/messages'; import * as Core from 'openai/core'; @@ -31,7 +31,6 @@ import { import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from 'openai/resources/beta/threads/runs/steps'; import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads'; import { BaseEvents, EventStream } from './EventStream'; -import MessageDelta = Messages.MessageDelta; export interface AssistantStreamEvents extends BaseEvents { run: (run: Run) => void; diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index e06e28094..b9a7ad4f8 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -1,10 +1,26 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; -import * as AudioAPI from './audio'; import * as SpeechAPI from './speech'; +import { Speech, SpeechCreateParams, SpeechModel } from './speech'; import * as TranscriptionsAPI from './transcriptions'; +import { + Transcription, + TranscriptionCreateParams, + TranscriptionCreateResponse, + TranscriptionSegment, + TranscriptionVerbose, + TranscriptionWord, + Transcriptions, +} from './transcriptions'; import * as TranslationsAPI from './translations'; +import { + Translation, + TranslationCreateParams, + TranslationCreateResponse, + TranslationVerbose, + Translations, +} from './translations'; export class Audio extends APIResource { transcriptions: TranscriptionsAPI.Transcriptions = new TranscriptionsAPI.Transcriptions(this._client); @@ -20,30 +36,30 @@ export type AudioModel = 'whisper-1'; */ export type AudioResponseFormat = 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt'; -export namespace Audio { - export import AudioModel = AudioAPI.AudioModel; - export import AudioResponseFormat = AudioAPI.AudioResponseFormat; - export import Transcriptions = TranscriptionsAPI.Transcriptions; - export import Transcription = TranscriptionsAPI.Transcription; - export import TranscriptionSegment = TranscriptionsAPI.TranscriptionSegment; - export import TranscriptionVerbose = TranscriptionsAPI.TranscriptionVerbose; - export import TranscriptionWord = TranscriptionsAPI.TranscriptionWord; - export import TranscriptionCreateResponse = TranscriptionsAPI.TranscriptionCreateResponse; - export type TranscriptionCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = - | AudioAPI.AudioResponseFormat - | undefined, - > = TranscriptionsAPI.TranscriptionCreateParams; - export import Translations = TranslationsAPI.Translations; - export import Translation = TranslationsAPI.Translation; - export import TranslationVerbose = TranslationsAPI.TranslationVerbose; - export import TranslationCreateResponse = TranslationsAPI.TranslationCreateResponse; - export type TranslationCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = - | AudioAPI.AudioResponseFormat - | undefined, - > = TranslationsAPI.TranslationCreateParams; - export import Speech = SpeechAPI.Speech; - export import SpeechModel = SpeechAPI.SpeechModel; - export import SpeechCreateParams = SpeechAPI.SpeechCreateParams; +Audio.Transcriptions = Transcriptions; +Audio.Translations = Translations; +Audio.Speech = Speech; + +export declare namespace Audio { + export { type AudioModel as AudioModel, type AudioResponseFormat as AudioResponseFormat }; + + export { + Transcriptions as Transcriptions, + type Transcription as Transcription, + type TranscriptionSegment as TranscriptionSegment, + type TranscriptionVerbose as TranscriptionVerbose, + type TranscriptionWord as TranscriptionWord, + type TranscriptionCreateResponse as TranscriptionCreateResponse, + type TranscriptionCreateParams as TranscriptionCreateParams, + }; + + export { + Translations as Translations, + type Translation as Translation, + type TranslationVerbose as TranslationVerbose, + type TranslationCreateResponse as TranslationCreateResponse, + type TranslationCreateParams as TranslationCreateParams, + }; + + export { Speech as Speech, type SpeechModel as SpeechModel, type SpeechCreateParams as SpeechCreateParams }; } diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts index 952c05b03..2bbe9e3ab 100644 --- a/src/resources/audio/index.ts +++ b/src/resources/audio/index.ts @@ -1,20 +1,20 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { AudioModel, AudioResponseFormat, Audio } from './audio'; -export { SpeechModel, SpeechCreateParams, Speech } from './speech'; +export { Audio, type AudioModel, type AudioResponseFormat } from './audio'; +export { Speech, type SpeechModel, type SpeechCreateParams } from './speech'; export { - Transcription, - TranscriptionSegment, - TranscriptionVerbose, - TranscriptionWord, - TranscriptionCreateResponse, - TranscriptionCreateParams, Transcriptions, + type Transcription, + type TranscriptionSegment, + type TranscriptionVerbose, + type TranscriptionWord, + type TranscriptionCreateResponse, + type TranscriptionCreateParams, } from './transcriptions'; export { - Translation, - TranslationVerbose, - TranslationCreateResponse, - TranslationCreateParams, Translations, + type Translation, + type TranslationVerbose, + type TranslationCreateResponse, + type TranslationCreateParams, } from './translations'; diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 34fb26b02..da99bf649 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -2,7 +2,6 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as SpeechAPI from './speech'; import { type Response } from '../../_shims/index'; export class Speech extends APIResource { @@ -49,7 +48,6 @@ export interface SpeechCreateParams { speed?: number; } -export namespace Speech { - export import SpeechModel = SpeechAPI.SpeechModel; - export import SpeechCreateParams = SpeechAPI.SpeechCreateParams; +export declare namespace Speech { + export { type SpeechModel as SpeechModel, type SpeechCreateParams as SpeechCreateParams }; } diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 902dc9e5f..dd4258787 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -2,7 +2,6 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as TranscriptionsAPI from './transcriptions'; import * as AudioAPI from './audio'; export class Transcriptions extends APIResource { @@ -205,15 +204,13 @@ export interface TranscriptionCreateParams< timestamp_granularities?: Array<'word' | 'segment'>; } -export namespace Transcriptions { - export import Transcription = TranscriptionsAPI.Transcription; - export import TranscriptionSegment = TranscriptionsAPI.TranscriptionSegment; - export import TranscriptionVerbose = TranscriptionsAPI.TranscriptionVerbose; - export import TranscriptionWord = TranscriptionsAPI.TranscriptionWord; - export import TranscriptionCreateResponse = TranscriptionsAPI.TranscriptionCreateResponse; - export type TranscriptionCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = - | AudioAPI.AudioResponseFormat - | undefined, - > = TranscriptionsAPI.TranscriptionCreateParams; +export declare namespace Transcriptions { + export { + type Transcription as Transcription, + type TranscriptionSegment as TranscriptionSegment, + type TranscriptionVerbose as TranscriptionVerbose, + type TranscriptionWord as TranscriptionWord, + type TranscriptionCreateResponse as TranscriptionCreateResponse, + type TranscriptionCreateParams as TranscriptionCreateParams, + }; } diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 36c2dc7c2..b98a95044 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -2,7 +2,6 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as TranslationsAPI from './translations'; import * as AudioAPI from './audio'; import * as TranscriptionsAPI from './transcriptions'; @@ -98,13 +97,11 @@ export interface TranslationCreateParams< temperature?: number; } -export namespace Translations { - export import Translation = TranslationsAPI.Translation; - export import TranslationVerbose = TranslationsAPI.TranslationVerbose; - export import TranslationCreateResponse = TranslationsAPI.TranslationCreateResponse; - export type TranslationCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = - | AudioAPI.AudioResponseFormat - | undefined, - > = TranslationsAPI.TranslationCreateParams; +export declare namespace Translations { + export { + type Translation as Translation, + type TranslationVerbose as TranslationVerbose, + type TranslationCreateResponse as TranslationCreateResponse, + type TranslationCreateParams as TranslationCreateParams, + }; } diff --git a/src/resources/batches.ts b/src/resources/batches.ts index 738582f9e..e68e7569c 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -244,11 +244,15 @@ export interface BatchCreateParams { export interface BatchListParams extends CursorPageParams {} -export namespace Batches { - export import Batch = BatchesAPI.Batch; - export import BatchError = BatchesAPI.BatchError; - export import BatchRequestCounts = BatchesAPI.BatchRequestCounts; - export import BatchesPage = BatchesAPI.BatchesPage; - export import BatchCreateParams = BatchesAPI.BatchCreateParams; - export import BatchListParams = BatchesAPI.BatchListParams; +Batches.BatchesPage = BatchesPage; + +export declare namespace Batches { + export { + type Batch as Batch, + type BatchError as BatchError, + type BatchRequestCounts as BatchRequestCounts, + BatchesPage as BatchesPage, + type BatchCreateParams as BatchCreateParams, + type BatchListParams as BatchListParams, + }; } diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index aa7362297..6d48089ce 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -3,7 +3,6 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; -import * as AssistantsAPI from './assistants'; import * as Shared from '../shared'; import * as ChatAPI from '../chat/chat'; import * as MessagesAPI from './threads/messages'; @@ -1396,20 +1395,24 @@ export interface AssistantListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace Assistants { - export import Assistant = AssistantsAPI.Assistant; - export import AssistantDeleted = AssistantsAPI.AssistantDeleted; - export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent; - export import AssistantTool = AssistantsAPI.AssistantTool; - export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool; - export import FileSearchTool = AssistantsAPI.FileSearchTool; - export import FunctionTool = AssistantsAPI.FunctionTool; - export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent; - export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent; - export import RunStreamEvent = AssistantsAPI.RunStreamEvent; - export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent; - export import AssistantsPage = AssistantsAPI.AssistantsPage; - export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams; - export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams; - export import AssistantListParams = AssistantsAPI.AssistantListParams; +Assistants.AssistantsPage = AssistantsPage; + +export declare namespace Assistants { + export { + type Assistant as Assistant, + type AssistantDeleted as AssistantDeleted, + type AssistantStreamEvent as AssistantStreamEvent, + type AssistantTool as AssistantTool, + type CodeInterpreterTool as CodeInterpreterTool, + type FileSearchTool as FileSearchTool, + type FunctionTool as FunctionTool, + type MessageStreamEvent as MessageStreamEvent, + type RunStepStreamEvent as RunStepStreamEvent, + type RunStreamEvent as RunStreamEvent, + type ThreadStreamEvent as ThreadStreamEvent, + AssistantsPage as AssistantsPage, + type AssistantCreateParams as AssistantCreateParams, + type AssistantUpdateParams as AssistantUpdateParams, + type AssistantListParams as AssistantListParams, + }; } diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 0bcf217a8..b904abe4a 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -3,8 +3,59 @@ import { APIResource } from '../../resource'; import * as AssistantsAPI from './assistants'; import * as ChatAPI from './chat/chat'; +import { + Assistant, + AssistantCreateParams, + AssistantDeleted, + AssistantListParams, + AssistantStreamEvent, + AssistantTool, + AssistantUpdateParams, + Assistants, + AssistantsPage, + CodeInterpreterTool, + FileSearchTool, + FunctionTool, + MessageStreamEvent, + RunStepStreamEvent, + RunStreamEvent, + ThreadStreamEvent, +} from './assistants'; import * as ThreadsAPI from './threads/threads'; +import { + AssistantResponseFormatOption, + AssistantToolChoice, + AssistantToolChoiceFunction, + AssistantToolChoiceOption, + Thread, + ThreadCreateAndRunParams, + ThreadCreateAndRunParamsNonStreaming, + ThreadCreateAndRunParamsStreaming, + ThreadCreateAndRunPollParams, + ThreadCreateAndRunStreamParams, + ThreadCreateParams, + ThreadDeleted, + ThreadUpdateParams, + Threads, +} from './threads/threads'; import * as VectorStoresAPI from './vector-stores/vector-stores'; +import { + AutoFileChunkingStrategyParam, + FileChunkingStrategy, + FileChunkingStrategyParam, + OtherFileChunkingStrategyObject, + StaticFileChunkingStrategy, + StaticFileChunkingStrategyObject, + StaticFileChunkingStrategyParam, + VectorStore, + VectorStoreCreateParams, + VectorStoreDeleted, + VectorStoreListParams, + VectorStoreUpdateParams, + VectorStores, + VectorStoresPage, +} from './vector-stores/vector-stores'; +import { Chat } from './chat/chat'; export class Beta extends APIResource { vectorStores: VectorStoresAPI.VectorStores = new VectorStoresAPI.VectorStores(this._client); @@ -13,50 +64,65 @@ export class Beta extends APIResource { threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this._client); } -export namespace Beta { - export import VectorStores = VectorStoresAPI.VectorStores; - export import AutoFileChunkingStrategyParam = VectorStoresAPI.AutoFileChunkingStrategyParam; - export import FileChunkingStrategy = VectorStoresAPI.FileChunkingStrategy; - export import FileChunkingStrategyParam = VectorStoresAPI.FileChunkingStrategyParam; - export import OtherFileChunkingStrategyObject = VectorStoresAPI.OtherFileChunkingStrategyObject; - export import StaticFileChunkingStrategy = VectorStoresAPI.StaticFileChunkingStrategy; - export import StaticFileChunkingStrategyObject = VectorStoresAPI.StaticFileChunkingStrategyObject; - export import StaticFileChunkingStrategyParam = VectorStoresAPI.StaticFileChunkingStrategyParam; - export import VectorStore = VectorStoresAPI.VectorStore; - export import VectorStoreDeleted = VectorStoresAPI.VectorStoreDeleted; - export import VectorStoresPage = VectorStoresAPI.VectorStoresPage; - export import VectorStoreCreateParams = VectorStoresAPI.VectorStoreCreateParams; - export import VectorStoreUpdateParams = VectorStoresAPI.VectorStoreUpdateParams; - export import VectorStoreListParams = VectorStoresAPI.VectorStoreListParams; - export import Chat = ChatAPI.Chat; - export import Assistants = AssistantsAPI.Assistants; - export import Assistant = AssistantsAPI.Assistant; - export import AssistantDeleted = AssistantsAPI.AssistantDeleted; - export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent; - export import AssistantTool = AssistantsAPI.AssistantTool; - export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool; - export import FileSearchTool = AssistantsAPI.FileSearchTool; - export import FunctionTool = AssistantsAPI.FunctionTool; - export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent; - export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent; - export import RunStreamEvent = AssistantsAPI.RunStreamEvent; - export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent; - export import AssistantsPage = AssistantsAPI.AssistantsPage; - export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams; - export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams; - export import AssistantListParams = AssistantsAPI.AssistantListParams; - export import Threads = ThreadsAPI.Threads; - export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption; - export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice; - export import AssistantToolChoiceFunction = ThreadsAPI.AssistantToolChoiceFunction; - export import AssistantToolChoiceOption = ThreadsAPI.AssistantToolChoiceOption; - export import Thread = ThreadsAPI.Thread; - export import ThreadDeleted = ThreadsAPI.ThreadDeleted; - export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams; - export import ThreadUpdateParams = ThreadsAPI.ThreadUpdateParams; - export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams; - export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; - export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; - export import ThreadCreateAndRunPollParams = ThreadsAPI.ThreadCreateAndRunPollParams; - export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams; +Beta.VectorStores = VectorStores; +Beta.VectorStoresPage = VectorStoresPage; +Beta.Assistants = Assistants; +Beta.AssistantsPage = AssistantsPage; +Beta.Threads = Threads; + +export declare namespace Beta { + export { + VectorStores as VectorStores, + type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, + type FileChunkingStrategy as FileChunkingStrategy, + type FileChunkingStrategyParam as FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy as StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam, + type VectorStore as VectorStore, + type VectorStoreDeleted as VectorStoreDeleted, + VectorStoresPage as VectorStoresPage, + type VectorStoreCreateParams as VectorStoreCreateParams, + type VectorStoreUpdateParams as VectorStoreUpdateParams, + type VectorStoreListParams as VectorStoreListParams, + }; + + export { Chat }; + + export { + Assistants as Assistants, + type Assistant as Assistant, + type AssistantDeleted as AssistantDeleted, + type AssistantStreamEvent as AssistantStreamEvent, + type AssistantTool as AssistantTool, + type CodeInterpreterTool as CodeInterpreterTool, + type FileSearchTool as FileSearchTool, + type FunctionTool as FunctionTool, + type MessageStreamEvent as MessageStreamEvent, + type RunStepStreamEvent as RunStepStreamEvent, + type RunStreamEvent as RunStreamEvent, + type ThreadStreamEvent as ThreadStreamEvent, + AssistantsPage as AssistantsPage, + type AssistantCreateParams as AssistantCreateParams, + type AssistantUpdateParams as AssistantUpdateParams, + type AssistantListParams as AssistantListParams, + }; + + export { + Threads as Threads, + type AssistantResponseFormatOption as AssistantResponseFormatOption, + type AssistantToolChoice as AssistantToolChoice, + type AssistantToolChoiceFunction as AssistantToolChoiceFunction, + type AssistantToolChoiceOption as AssistantToolChoiceOption, + type Thread as Thread, + type ThreadDeleted as ThreadDeleted, + type ThreadCreateParams as ThreadCreateParams, + type ThreadUpdateParams as ThreadUpdateParams, + type ThreadCreateAndRunParams as ThreadCreateAndRunParams, + type ThreadCreateAndRunParamsNonStreaming as ThreadCreateAndRunParamsNonStreaming, + type ThreadCreateAndRunParamsStreaming as ThreadCreateAndRunParamsStreaming, + type ThreadCreateAndRunPollParams, + type ThreadCreateAndRunStreamParams, + }; } diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index 9fcf805a1..d7111288f 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -1,54 +1,54 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - Assistant, - AssistantDeleted, - AssistantStreamEvent, - AssistantTool, - CodeInterpreterTool, - FileSearchTool, - FunctionTool, - MessageStreamEvent, - RunStepStreamEvent, - RunStreamEvent, - ThreadStreamEvent, - AssistantCreateParams, - AssistantUpdateParams, - AssistantListParams, AssistantsPage, Assistants, + type Assistant, + type AssistantDeleted, + type AssistantStreamEvent, + type AssistantTool, + type CodeInterpreterTool, + type FileSearchTool, + type FunctionTool, + type MessageStreamEvent, + type RunStepStreamEvent, + type RunStreamEvent, + type ThreadStreamEvent, + type AssistantCreateParams, + type AssistantUpdateParams, + type AssistantListParams, } from './assistants'; +export { Beta } from './beta'; +export { Chat } from './chat/index'; export { - AssistantResponseFormatOption, - AssistantToolChoice, - AssistantToolChoiceFunction, - AssistantToolChoiceOption, - Thread, - ThreadDeleted, - ThreadCreateParams, - ThreadUpdateParams, - ThreadCreateAndRunParams, - ThreadCreateAndRunParamsNonStreaming, - ThreadCreateAndRunParamsStreaming, - ThreadCreateAndRunPollParams, - ThreadCreateAndRunStreamParams, Threads, + type AssistantResponseFormatOption, + type AssistantToolChoice, + type AssistantToolChoiceFunction, + type AssistantToolChoiceOption, + type Thread, + type ThreadDeleted, + type ThreadCreateParams, + type ThreadUpdateParams, + type ThreadCreateAndRunParams, + type ThreadCreateAndRunParamsNonStreaming, + type ThreadCreateAndRunParamsStreaming, + type ThreadCreateAndRunPollParams, + type ThreadCreateAndRunStreamParams, } from './threads/index'; -export { Beta } from './beta'; -export { Chat } from './chat/index'; export { - AutoFileChunkingStrategyParam, - FileChunkingStrategy, - FileChunkingStrategyParam, - OtherFileChunkingStrategyObject, - StaticFileChunkingStrategy, - StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyParam, - VectorStore, - VectorStoreDeleted, - VectorStoreCreateParams, - VectorStoreUpdateParams, - VectorStoreListParams, VectorStoresPage, VectorStores, + type AutoFileChunkingStrategyParam, + type FileChunkingStrategy, + type FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyParam, + type VectorStore, + type VectorStoreDeleted, + type VectorStoreCreateParams, + type VectorStoreUpdateParams, + type VectorStoreListParams, } from './vector-stores/index'; diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index 1964cffb8..f67a1edde 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -1,73 +1,73 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - Annotation, - AnnotationDelta, - FileCitationAnnotation, - FileCitationDeltaAnnotation, - FilePathAnnotation, - FilePathDeltaAnnotation, - ImageFile, - ImageFileContentBlock, - ImageFileDelta, - ImageFileDeltaBlock, - ImageURL, - ImageURLContentBlock, - ImageURLDelta, - ImageURLDeltaBlock, - Message, - MessageContent, - MessageContentDelta, - MessageContentPartParam, - MessageDeleted, - MessageDelta, - MessageDeltaEvent, - RefusalContentBlock, - RefusalDeltaBlock, - Text, - TextContentBlock, - TextContentBlockParam, - TextDelta, - TextDeltaBlock, - MessageCreateParams, - MessageUpdateParams, - MessageListParams, MessagesPage, Messages, + type Annotation, + type AnnotationDelta, + type FileCitationAnnotation, + type FileCitationDeltaAnnotation, + type FilePathAnnotation, + type FilePathDeltaAnnotation, + type ImageFile, + type ImageFileContentBlock, + type ImageFileDelta, + type ImageFileDeltaBlock, + type ImageURL, + type ImageURLContentBlock, + type ImageURLDelta, + type ImageURLDeltaBlock, + type Message, + type MessageContent, + type MessageContentDelta, + type MessageContentPartParam, + type MessageDeleted, + type MessageDelta, + type MessageDeltaEvent, + type RefusalContentBlock, + type RefusalDeltaBlock, + type Text, + type TextContentBlock, + type TextContentBlockParam, + type TextDelta, + type TextDeltaBlock, + type MessageCreateParams, + type MessageUpdateParams, + type MessageListParams, } from './messages'; export { - AssistantResponseFormatOption, - AssistantToolChoice, - AssistantToolChoiceFunction, - AssistantToolChoiceOption, - Thread, - ThreadDeleted, - ThreadCreateParams, - ThreadUpdateParams, - ThreadCreateAndRunParams, - ThreadCreateAndRunParamsNonStreaming, - ThreadCreateAndRunParamsStreaming, - ThreadCreateAndRunPollParams, - ThreadCreateAndRunStreamParams, - Threads, -} from './threads'; -export { - RequiredActionFunctionToolCall, - Run, - RunStatus, - RunCreateParams, - RunCreateParamsNonStreaming, - RunCreateParamsStreaming, - RunUpdateParams, - RunListParams, - RunCreateAndPollParams, - RunCreateAndStreamParams, - RunStreamParams, - RunSubmitToolOutputsParams, - RunSubmitToolOutputsParamsNonStreaming, - RunSubmitToolOutputsParamsStreaming, - RunSubmitToolOutputsAndPollParams, - RunSubmitToolOutputsStreamParams, RunsPage, Runs, + type RequiredActionFunctionToolCall, + type Run, + type RunStatus, + type RunCreateParams, + type RunCreateParamsNonStreaming, + type RunCreateParamsStreaming, + type RunUpdateParams, + type RunListParams, + type RunSubmitToolOutputsParams, + type RunSubmitToolOutputsParamsNonStreaming, + type RunSubmitToolOutputsParamsStreaming, + type RunCreateAndPollParams, + type RunCreateAndStreamParams, + type RunStreamParams, + type RunSubmitToolOutputsAndPollParams, + type RunSubmitToolOutputsStreamParams, } from './runs/index'; +export { + Threads, + type AssistantResponseFormatOption, + type AssistantToolChoice, + type AssistantToolChoiceFunction, + type AssistantToolChoiceOption, + type Thread, + type ThreadDeleted, + type ThreadCreateParams, + type ThreadUpdateParams, + type ThreadCreateAndRunParams, + type ThreadCreateAndRunParamsNonStreaming, + type ThreadCreateAndRunParamsStreaming, + type ThreadCreateAndRunPollParams, + type ThreadCreateAndRunStreamParams, +} from './threads'; diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 59c92675b..af7977667 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -3,7 +3,6 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as MessagesAPI from './messages'; import * as AssistantsAPI from '../assistants'; import { CursorPage, type CursorPageParams } from '../../../pagination'; @@ -722,37 +721,41 @@ export interface MessageListParams extends CursorPageParams { run_id?: string; } -export namespace Messages { - export import Annotation = MessagesAPI.Annotation; - export import AnnotationDelta = MessagesAPI.AnnotationDelta; - export import FileCitationAnnotation = MessagesAPI.FileCitationAnnotation; - export import FileCitationDeltaAnnotation = MessagesAPI.FileCitationDeltaAnnotation; - export import FilePathAnnotation = MessagesAPI.FilePathAnnotation; - export import FilePathDeltaAnnotation = MessagesAPI.FilePathDeltaAnnotation; - export import ImageFile = MessagesAPI.ImageFile; - export import ImageFileContentBlock = MessagesAPI.ImageFileContentBlock; - export import ImageFileDelta = MessagesAPI.ImageFileDelta; - export import ImageFileDeltaBlock = MessagesAPI.ImageFileDeltaBlock; - export import ImageURL = MessagesAPI.ImageURL; - export import ImageURLContentBlock = MessagesAPI.ImageURLContentBlock; - export import ImageURLDelta = MessagesAPI.ImageURLDelta; - export import ImageURLDeltaBlock = MessagesAPI.ImageURLDeltaBlock; - export import Message = MessagesAPI.Message; - export import MessageContent = MessagesAPI.MessageContent; - export import MessageContentDelta = MessagesAPI.MessageContentDelta; - export import MessageContentPartParam = MessagesAPI.MessageContentPartParam; - export import MessageDeleted = MessagesAPI.MessageDeleted; - export import MessageDelta = MessagesAPI.MessageDelta; - export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent; - export import RefusalContentBlock = MessagesAPI.RefusalContentBlock; - export import RefusalDeltaBlock = MessagesAPI.RefusalDeltaBlock; - export import Text = MessagesAPI.Text; - export import TextContentBlock = MessagesAPI.TextContentBlock; - export import TextContentBlockParam = MessagesAPI.TextContentBlockParam; - export import TextDelta = MessagesAPI.TextDelta; - export import TextDeltaBlock = MessagesAPI.TextDeltaBlock; - export import MessagesPage = MessagesAPI.MessagesPage; - export import MessageCreateParams = MessagesAPI.MessageCreateParams; - export import MessageUpdateParams = MessagesAPI.MessageUpdateParams; - export import MessageListParams = MessagesAPI.MessageListParams; +Messages.MessagesPage = MessagesPage; + +export declare namespace Messages { + export { + type Annotation as Annotation, + type AnnotationDelta as AnnotationDelta, + type FileCitationAnnotation as FileCitationAnnotation, + type FileCitationDeltaAnnotation as FileCitationDeltaAnnotation, + type FilePathAnnotation as FilePathAnnotation, + type FilePathDeltaAnnotation as FilePathDeltaAnnotation, + type ImageFile as ImageFile, + type ImageFileContentBlock as ImageFileContentBlock, + type ImageFileDelta as ImageFileDelta, + type ImageFileDeltaBlock as ImageFileDeltaBlock, + type ImageURL as ImageURL, + type ImageURLContentBlock as ImageURLContentBlock, + type ImageURLDelta as ImageURLDelta, + type ImageURLDeltaBlock as ImageURLDeltaBlock, + type Message as Message, + type MessageContent as MessageContent, + type MessageContentDelta as MessageContentDelta, + type MessageContentPartParam as MessageContentPartParam, + type MessageDeleted as MessageDeleted, + type MessageDelta as MessageDelta, + type MessageDeltaEvent as MessageDeltaEvent, + type RefusalContentBlock as RefusalContentBlock, + type RefusalDeltaBlock as RefusalDeltaBlock, + type Text as Text, + type TextContentBlock as TextContentBlock, + type TextContentBlockParam as TextContentBlockParam, + type TextDelta as TextDelta, + type TextDeltaBlock as TextDeltaBlock, + MessagesPage as MessagesPage, + type MessageCreateParams as MessageCreateParams, + type MessageUpdateParams as MessageUpdateParams, + type MessageListParams as MessageListParams, + }; } diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts index 9496f59e1..9dbe575bc 100644 --- a/src/resources/beta/threads/runs/index.ts +++ b/src/resources/beta/threads/runs/index.ts @@ -1,46 +1,46 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - CodeInterpreterLogs, - CodeInterpreterOutputImage, - CodeInterpreterToolCall, - CodeInterpreterToolCallDelta, - FileSearchToolCall, - FileSearchToolCallDelta, - FunctionToolCall, - FunctionToolCallDelta, - MessageCreationStepDetails, - RunStep, - RunStepDelta, - RunStepDeltaEvent, - RunStepDeltaMessageDelta, - RunStepInclude, - ToolCall, - ToolCallDelta, - ToolCallDeltaObject, - ToolCallsStepDetails, - StepRetrieveParams, - StepListParams, RunStepsPage, Steps, + type CodeInterpreterLogs, + type CodeInterpreterOutputImage, + type CodeInterpreterToolCall, + type CodeInterpreterToolCallDelta, + type FileSearchToolCall, + type FileSearchToolCallDelta, + type FunctionToolCall, + type FunctionToolCallDelta, + type MessageCreationStepDetails, + type RunStep, + type RunStepDelta, + type RunStepDeltaEvent, + type RunStepDeltaMessageDelta, + type RunStepInclude, + type ToolCall, + type ToolCallDelta, + type ToolCallDeltaObject, + type ToolCallsStepDetails, + type StepRetrieveParams, + type StepListParams, } from './steps'; export { - RequiredActionFunctionToolCall, - Run, - RunStatus, - RunCreateParams, - RunCreateParamsNonStreaming, - RunCreateParamsStreaming, - RunUpdateParams, - RunListParams, - RunCreateAndPollParams, - RunCreateAndStreamParams, - RunStreamParams, - RunSubmitToolOutputsParams, - RunSubmitToolOutputsParamsNonStreaming, - RunSubmitToolOutputsParamsStreaming, - RunSubmitToolOutputsAndPollParams, - RunSubmitToolOutputsStreamParams, RunsPage, Runs, + type RequiredActionFunctionToolCall, + type Run, + type RunStatus, + type RunCreateParams, + type RunCreateParamsNonStreaming, + type RunCreateParamsStreaming, + type RunUpdateParams, + type RunListParams, + type RunCreateAndPollParams, + type RunCreateAndStreamParams, + type RunStreamParams, + type RunSubmitToolOutputsParams, + type RunSubmitToolOutputsParamsNonStreaming, + type RunSubmitToolOutputsParamsStreaming, + type RunSubmitToolOutputsAndPollParams, + type RunSubmitToolOutputsStreamParams, } from './runs'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index b48edd5b1..83a447a91 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -13,6 +13,30 @@ import * as ChatAPI from '../../../chat/chat'; import * as MessagesAPI from '../messages'; import * as ThreadsAPI from '../threads'; import * as StepsAPI from './steps'; +import { + CodeInterpreterLogs, + CodeInterpreterOutputImage, + CodeInterpreterToolCall, + CodeInterpreterToolCallDelta, + FileSearchToolCall, + FileSearchToolCallDelta, + FunctionToolCall, + FunctionToolCallDelta, + MessageCreationStepDetails, + RunStep, + RunStepDelta, + RunStepDeltaEvent, + RunStepDeltaMessageDelta, + RunStepInclude, + RunStepsPage, + StepListParams, + StepRetrieveParams, + Steps, + ToolCall, + ToolCallDelta, + ToolCallDeltaObject, + ToolCallsStepDetails, +} from './steps'; import { CursorPage, type CursorPageParams } from '../../../../pagination'; import { Stream } from '../../../../streaming'; @@ -1619,44 +1643,53 @@ export namespace RunSubmitToolOutputsStreamParams { } } -export namespace Runs { - export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall; - export import Run = RunsAPI.Run; - export import RunStatus = RunsAPI.RunStatus; - export import RunsPage = RunsAPI.RunsPage; - export import RunCreateParams = RunsAPI.RunCreateParams; - export import RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; - export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; - export import RunUpdateParams = RunsAPI.RunUpdateParams; - export import RunListParams = RunsAPI.RunListParams; - export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams; - export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams; - export import RunStreamParams = RunsAPI.RunStreamParams; - export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams; - export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming; - export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming; - export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams; - export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams; - export import Steps = StepsAPI.Steps; - export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs; - export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage; - export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall; - export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta; - export import FileSearchToolCall = StepsAPI.FileSearchToolCall; - export import FileSearchToolCallDelta = StepsAPI.FileSearchToolCallDelta; - export import FunctionToolCall = StepsAPI.FunctionToolCall; - export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta; - export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails; - export import RunStep = StepsAPI.RunStep; - export import RunStepDelta = StepsAPI.RunStepDelta; - export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent; - export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta; - export import RunStepInclude = StepsAPI.RunStepInclude; - export import ToolCall = StepsAPI.ToolCall; - export import ToolCallDelta = StepsAPI.ToolCallDelta; - export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject; - export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails; - export import RunStepsPage = StepsAPI.RunStepsPage; - export import StepRetrieveParams = StepsAPI.StepRetrieveParams; - export import StepListParams = StepsAPI.StepListParams; +Runs.RunsPage = RunsPage; +Runs.Steps = Steps; +Runs.RunStepsPage = RunStepsPage; + +export declare namespace Runs { + export { + type RequiredActionFunctionToolCall as RequiredActionFunctionToolCall, + type Run as Run, + type RunStatus as RunStatus, + RunsPage as RunsPage, + type RunCreateParams as RunCreateParams, + type RunCreateParamsNonStreaming as RunCreateParamsNonStreaming, + type RunCreateParamsStreaming as RunCreateParamsStreaming, + type RunUpdateParams as RunUpdateParams, + type RunListParams as RunListParams, + type RunCreateAndPollParams, + type RunCreateAndStreamParams, + type RunStreamParams, + type RunSubmitToolOutputsParams as RunSubmitToolOutputsParams, + type RunSubmitToolOutputsParamsNonStreaming as RunSubmitToolOutputsParamsNonStreaming, + type RunSubmitToolOutputsParamsStreaming as RunSubmitToolOutputsParamsStreaming, + type RunSubmitToolOutputsAndPollParams, + type RunSubmitToolOutputsStreamParams, + }; + + export { + Steps as Steps, + type CodeInterpreterLogs as CodeInterpreterLogs, + type CodeInterpreterOutputImage as CodeInterpreterOutputImage, + type CodeInterpreterToolCall as CodeInterpreterToolCall, + type CodeInterpreterToolCallDelta as CodeInterpreterToolCallDelta, + type FileSearchToolCall as FileSearchToolCall, + type FileSearchToolCallDelta as FileSearchToolCallDelta, + type FunctionToolCall as FunctionToolCall, + type FunctionToolCallDelta as FunctionToolCallDelta, + type MessageCreationStepDetails as MessageCreationStepDetails, + type RunStep as RunStep, + type RunStepDelta as RunStepDelta, + type RunStepDeltaEvent as RunStepDeltaEvent, + type RunStepDeltaMessageDelta as RunStepDeltaMessageDelta, + type RunStepInclude as RunStepInclude, + type ToolCall as ToolCall, + type ToolCallDelta as ToolCallDelta, + type ToolCallDeltaObject as ToolCallDeltaObject, + type ToolCallsStepDetails as ToolCallsStepDetails, + RunStepsPage as RunStepsPage, + type StepRetrieveParams as StepRetrieveParams, + type StepListParams as StepListParams, + }; } diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index c076191a3..b10bcb868 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -738,26 +738,30 @@ export interface StepListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace Steps { - export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs; - export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage; - export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall; - export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta; - export import FileSearchToolCall = StepsAPI.FileSearchToolCall; - export import FileSearchToolCallDelta = StepsAPI.FileSearchToolCallDelta; - export import FunctionToolCall = StepsAPI.FunctionToolCall; - export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta; - export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails; - export import RunStep = StepsAPI.RunStep; - export import RunStepDelta = StepsAPI.RunStepDelta; - export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent; - export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta; - export import RunStepInclude = StepsAPI.RunStepInclude; - export import ToolCall = StepsAPI.ToolCall; - export import ToolCallDelta = StepsAPI.ToolCallDelta; - export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject; - export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails; - export import RunStepsPage = StepsAPI.RunStepsPage; - export import StepRetrieveParams = StepsAPI.StepRetrieveParams; - export import StepListParams = StepsAPI.StepListParams; +Steps.RunStepsPage = RunStepsPage; + +export declare namespace Steps { + export { + type CodeInterpreterLogs as CodeInterpreterLogs, + type CodeInterpreterOutputImage as CodeInterpreterOutputImage, + type CodeInterpreterToolCall as CodeInterpreterToolCall, + type CodeInterpreterToolCallDelta as CodeInterpreterToolCallDelta, + type FileSearchToolCall as FileSearchToolCall, + type FileSearchToolCallDelta as FileSearchToolCallDelta, + type FunctionToolCall as FunctionToolCall, + type FunctionToolCallDelta as FunctionToolCallDelta, + type MessageCreationStepDetails as MessageCreationStepDetails, + type RunStep as RunStep, + type RunStepDelta as RunStepDelta, + type RunStepDeltaEvent as RunStepDeltaEvent, + type RunStepDeltaMessageDelta as RunStepDeltaMessageDelta, + type RunStepInclude as RunStepInclude, + type ToolCall as ToolCall, + type ToolCallDelta as ToolCallDelta, + type ToolCallDeltaObject as ToolCallDeltaObject, + type ToolCallsStepDetails as ToolCallsStepDetails, + RunStepsPage as RunStepsPage, + type StepRetrieveParams as StepRetrieveParams, + type StepListParams as StepListParams, + }; } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index be959eb30..899645508 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -10,8 +10,63 @@ import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; import * as ChatAPI from '../../chat/chat'; import * as MessagesAPI from './messages'; +import { + Annotation, + AnnotationDelta, + FileCitationAnnotation, + FileCitationDeltaAnnotation, + FilePathAnnotation, + FilePathDeltaAnnotation, + ImageFile, + ImageFileContentBlock, + ImageFileDelta, + ImageFileDeltaBlock, + ImageURL, + ImageURLContentBlock, + ImageURLDelta, + ImageURLDeltaBlock, + Message as MessagesAPIMessage, + MessageContent, + MessageContentDelta, + MessageContentPartParam, + MessageCreateParams, + MessageDeleted, + MessageDelta, + MessageDeltaEvent, + MessageListParams, + MessageUpdateParams, + Messages, + MessagesPage, + RefusalContentBlock, + RefusalDeltaBlock, + Text, + TextContentBlock, + TextContentBlockParam, + TextDelta, + TextDeltaBlock, +} from './messages'; import * as VectorStoresAPI from '../vector-stores/vector-stores'; import * as RunsAPI from './runs/runs'; +import { + RequiredActionFunctionToolCall, + Run, + RunCreateAndPollParams, + RunCreateAndStreamParams, + RunCreateParams, + RunCreateParamsNonStreaming, + RunCreateParamsStreaming, + RunListParams, + RunStatus, + RunStreamParams, + RunSubmitToolOutputsAndPollParams, + RunSubmitToolOutputsParams, + RunSubmitToolOutputsParamsNonStreaming, + RunSubmitToolOutputsParamsStreaming, + RunSubmitToolOutputsStreamParams, + RunUpdateParams, + Runs, + RunsPage, +} from './runs/runs'; import { Stream } from '../../../streaming'; export class Threads extends APIResource { @@ -1489,69 +1544,82 @@ export namespace ThreadCreateAndRunStreamParams { } } -export namespace Threads { - export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption; - export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice; - export import AssistantToolChoiceFunction = ThreadsAPI.AssistantToolChoiceFunction; - export import AssistantToolChoiceOption = ThreadsAPI.AssistantToolChoiceOption; - export import Thread = ThreadsAPI.Thread; - export import ThreadDeleted = ThreadsAPI.ThreadDeleted; - export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams; - export import ThreadUpdateParams = ThreadsAPI.ThreadUpdateParams; - export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams; - export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; - export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; - export import ThreadCreateAndRunPollParams = ThreadsAPI.ThreadCreateAndRunPollParams; - export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams; - export import Runs = RunsAPI.Runs; - export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall; - export import Run = RunsAPI.Run; - export import RunStatus = RunsAPI.RunStatus; - export import RunsPage = RunsAPI.RunsPage; - export import RunCreateParams = RunsAPI.RunCreateParams; - export import RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; - export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; - export import RunUpdateParams = RunsAPI.RunUpdateParams; - export import RunListParams = RunsAPI.RunListParams; - export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams; - export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams; - export import RunStreamParams = RunsAPI.RunStreamParams; - export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams; - export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming; - export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming; - export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams; - export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams; - export import Messages = MessagesAPI.Messages; - export import Annotation = MessagesAPI.Annotation; - export import AnnotationDelta = MessagesAPI.AnnotationDelta; - export import FileCitationAnnotation = MessagesAPI.FileCitationAnnotation; - export import FileCitationDeltaAnnotation = MessagesAPI.FileCitationDeltaAnnotation; - export import FilePathAnnotation = MessagesAPI.FilePathAnnotation; - export import FilePathDeltaAnnotation = MessagesAPI.FilePathDeltaAnnotation; - export import ImageFile = MessagesAPI.ImageFile; - export import ImageFileContentBlock = MessagesAPI.ImageFileContentBlock; - export import ImageFileDelta = MessagesAPI.ImageFileDelta; - export import ImageFileDeltaBlock = MessagesAPI.ImageFileDeltaBlock; - export import ImageURL = MessagesAPI.ImageURL; - export import ImageURLContentBlock = MessagesAPI.ImageURLContentBlock; - export import ImageURLDelta = MessagesAPI.ImageURLDelta; - export import ImageURLDeltaBlock = MessagesAPI.ImageURLDeltaBlock; - export import Message = MessagesAPI.Message; - export import MessageContent = MessagesAPI.MessageContent; - export import MessageContentDelta = MessagesAPI.MessageContentDelta; - export import MessageContentPartParam = MessagesAPI.MessageContentPartParam; - export import MessageDeleted = MessagesAPI.MessageDeleted; - export import MessageDelta = MessagesAPI.MessageDelta; - export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent; - export import RefusalContentBlock = MessagesAPI.RefusalContentBlock; - export import RefusalDeltaBlock = MessagesAPI.RefusalDeltaBlock; - export import Text = MessagesAPI.Text; - export import TextContentBlock = MessagesAPI.TextContentBlock; - export import TextContentBlockParam = MessagesAPI.TextContentBlockParam; - export import TextDelta = MessagesAPI.TextDelta; - export import TextDeltaBlock = MessagesAPI.TextDeltaBlock; - export import MessagesPage = MessagesAPI.MessagesPage; - export import MessageCreateParams = MessagesAPI.MessageCreateParams; - export import MessageUpdateParams = MessagesAPI.MessageUpdateParams; - export import MessageListParams = MessagesAPI.MessageListParams; +Threads.Runs = Runs; +Threads.RunsPage = RunsPage; +Threads.Messages = Messages; +Threads.MessagesPage = MessagesPage; + +export declare namespace Threads { + export { + type AssistantResponseFormatOption as AssistantResponseFormatOption, + type AssistantToolChoice as AssistantToolChoice, + type AssistantToolChoiceFunction as AssistantToolChoiceFunction, + type AssistantToolChoiceOption as AssistantToolChoiceOption, + type Thread as Thread, + type ThreadDeleted as ThreadDeleted, + type ThreadCreateParams as ThreadCreateParams, + type ThreadUpdateParams as ThreadUpdateParams, + type ThreadCreateAndRunParams as ThreadCreateAndRunParams, + type ThreadCreateAndRunParamsNonStreaming as ThreadCreateAndRunParamsNonStreaming, + type ThreadCreateAndRunParamsStreaming as ThreadCreateAndRunParamsStreaming, + type ThreadCreateAndRunPollParams, + type ThreadCreateAndRunStreamParams, + }; + + export { + Runs as Runs, + type RequiredActionFunctionToolCall as RequiredActionFunctionToolCall, + type Run as Run, + type RunStatus as RunStatus, + RunsPage as RunsPage, + type RunCreateParams as RunCreateParams, + type RunCreateParamsNonStreaming as RunCreateParamsNonStreaming, + type RunCreateParamsStreaming as RunCreateParamsStreaming, + type RunUpdateParams as RunUpdateParams, + type RunListParams as RunListParams, + type RunCreateAndPollParams, + type RunCreateAndStreamParams, + type RunStreamParams, + type RunSubmitToolOutputsParams as RunSubmitToolOutputsParams, + type RunSubmitToolOutputsParamsNonStreaming as RunSubmitToolOutputsParamsNonStreaming, + type RunSubmitToolOutputsParamsStreaming as RunSubmitToolOutputsParamsStreaming, + type RunSubmitToolOutputsAndPollParams, + type RunSubmitToolOutputsStreamParams, + }; + + export { + Messages as Messages, + type Annotation as Annotation, + type AnnotationDelta as AnnotationDelta, + type FileCitationAnnotation as FileCitationAnnotation, + type FileCitationDeltaAnnotation as FileCitationDeltaAnnotation, + type FilePathAnnotation as FilePathAnnotation, + type FilePathDeltaAnnotation as FilePathDeltaAnnotation, + type ImageFile as ImageFile, + type ImageFileContentBlock as ImageFileContentBlock, + type ImageFileDelta as ImageFileDelta, + type ImageFileDeltaBlock as ImageFileDeltaBlock, + type ImageURL as ImageURL, + type ImageURLContentBlock as ImageURLContentBlock, + type ImageURLDelta as ImageURLDelta, + type ImageURLDeltaBlock as ImageURLDeltaBlock, + type MessagesAPIMessage as Message, + type MessageContent as MessageContent, + type MessageContentDelta as MessageContentDelta, + type MessageContentPartParam as MessageContentPartParam, + type MessageDeleted as MessageDeleted, + type MessageDelta as MessageDelta, + type MessageDeltaEvent as MessageDeltaEvent, + type RefusalContentBlock as RefusalContentBlock, + type RefusalDeltaBlock as RefusalDeltaBlock, + type Text as Text, + type TextContentBlock as TextContentBlock, + type TextContentBlockParam as TextContentBlockParam, + type TextDelta as TextDelta, + type TextDeltaBlock as TextDeltaBlock, + MessagesPage as MessagesPage, + type MessageCreateParams as MessageCreateParams, + type MessageUpdateParams as MessageUpdateParams, + type MessageListParams as MessageListParams, + }; } diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts index 3436d7575..533e6ce03 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/beta/vector-stores/file-batches.ts @@ -6,7 +6,6 @@ import { sleep } from '../../../core'; import { Uploadable } from '../../../core'; import { allSettledWithThrow } from '../../../lib/Util'; import * as Core from '../../../core'; -import * as FileBatchesAPI from './file-batches'; import * as FilesAPI from './files'; import { VectorStoreFilesPage } from './files'; import * as VectorStoresAPI from './vector-stores'; @@ -294,10 +293,12 @@ export interface FileBatchListFilesParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace FileBatches { - export import VectorStoreFileBatch = FileBatchesAPI.VectorStoreFileBatch; - export import FileBatchCreateParams = FileBatchesAPI.FileBatchCreateParams; - export import FileBatchListFilesParams = FileBatchesAPI.FileBatchListFilesParams; +export declare namespace FileBatches { + export { + type VectorStoreFileBatch as VectorStoreFileBatch, + type FileBatchCreateParams as FileBatchCreateParams, + type FileBatchListFilesParams as FileBatchListFilesParams, + }; } export { VectorStoreFilesPage }; diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index f82cd63df..a263a0491 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -3,7 +3,6 @@ import { APIResource } from '../../../resource'; import { sleep, Uploadable, isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as FilesAPI from './files'; import * as VectorStoresAPI from './vector-stores'; import { CursorPage, type CursorPageParams } from '../../../pagination'; @@ -286,10 +285,14 @@ export interface FileListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace Files { - export import VectorStoreFile = FilesAPI.VectorStoreFile; - export import VectorStoreFileDeleted = FilesAPI.VectorStoreFileDeleted; - export import VectorStoreFilesPage = FilesAPI.VectorStoreFilesPage; - export import FileCreateParams = FilesAPI.FileCreateParams; - export import FileListParams = FilesAPI.FileListParams; +Files.VectorStoreFilesPage = VectorStoreFilesPage; + +export declare namespace Files { + export { + type VectorStoreFile as VectorStoreFile, + type VectorStoreFileDeleted as VectorStoreFileDeleted, + VectorStoreFilesPage as VectorStoreFilesPage, + type FileCreateParams as FileCreateParams, + type FileListParams as FileListParams, + }; } diff --git a/src/resources/beta/vector-stores/index.ts b/src/resources/beta/vector-stores/index.ts index f70215f8f..89fc0cde0 100644 --- a/src/resources/beta/vector-stores/index.ts +++ b/src/resources/beta/vector-stores/index.ts @@ -1,32 +1,32 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - AutoFileChunkingStrategyParam, - FileChunkingStrategy, - FileChunkingStrategyParam, - OtherFileChunkingStrategyObject, - StaticFileChunkingStrategy, - StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyParam, - VectorStore, - VectorStoreDeleted, - VectorStoreCreateParams, - VectorStoreUpdateParams, - VectorStoreListParams, - VectorStoresPage, - VectorStores, -} from './vector-stores'; + FileBatches, + type VectorStoreFileBatch, + type FileBatchCreateParams, + type FileBatchListFilesParams, +} from './file-batches'; export { - VectorStoreFile, - VectorStoreFileDeleted, - FileCreateParams, - FileListParams, VectorStoreFilesPage, Files, + type VectorStoreFile, + type VectorStoreFileDeleted, + type FileCreateParams, + type FileListParams, } from './files'; export { - VectorStoreFileBatch, - FileBatchCreateParams, - FileBatchListFilesParams, - FileBatches, -} from './file-batches'; + VectorStoresPage, + VectorStores, + type AutoFileChunkingStrategyParam, + type FileChunkingStrategy, + type FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyParam, + type VectorStore, + type VectorStoreDeleted, + type VectorStoreCreateParams, + type VectorStoreUpdateParams, + type VectorStoreListParams, +} from './vector-stores'; diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index 3c9aa707d..4d1e83dce 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -3,9 +3,22 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as VectorStoresAPI from './vector-stores'; import * as FileBatchesAPI from './file-batches'; +import { + FileBatchCreateParams, + FileBatchListFilesParams, + FileBatches, + VectorStoreFileBatch, +} from './file-batches'; import * as FilesAPI from './files'; +import { + FileCreateParams, + FileListParams, + Files, + VectorStoreFile, + VectorStoreFileDeleted, + VectorStoreFilesPage, +} from './files'; import { CursorPage, type CursorPageParams } from '../../../pagination'; export class VectorStores extends APIResource { @@ -371,28 +384,41 @@ export interface VectorStoreListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace VectorStores { - export import AutoFileChunkingStrategyParam = VectorStoresAPI.AutoFileChunkingStrategyParam; - export import FileChunkingStrategy = VectorStoresAPI.FileChunkingStrategy; - export import FileChunkingStrategyParam = VectorStoresAPI.FileChunkingStrategyParam; - export import OtherFileChunkingStrategyObject = VectorStoresAPI.OtherFileChunkingStrategyObject; - export import StaticFileChunkingStrategy = VectorStoresAPI.StaticFileChunkingStrategy; - export import StaticFileChunkingStrategyObject = VectorStoresAPI.StaticFileChunkingStrategyObject; - export import StaticFileChunkingStrategyParam = VectorStoresAPI.StaticFileChunkingStrategyParam; - export import VectorStore = VectorStoresAPI.VectorStore; - export import VectorStoreDeleted = VectorStoresAPI.VectorStoreDeleted; - export import VectorStoresPage = VectorStoresAPI.VectorStoresPage; - export import VectorStoreCreateParams = VectorStoresAPI.VectorStoreCreateParams; - export import VectorStoreUpdateParams = VectorStoresAPI.VectorStoreUpdateParams; - export import VectorStoreListParams = VectorStoresAPI.VectorStoreListParams; - export import Files = FilesAPI.Files; - export import VectorStoreFile = FilesAPI.VectorStoreFile; - export import VectorStoreFileDeleted = FilesAPI.VectorStoreFileDeleted; - export import VectorStoreFilesPage = FilesAPI.VectorStoreFilesPage; - export import FileCreateParams = FilesAPI.FileCreateParams; - export import FileListParams = FilesAPI.FileListParams; - export import FileBatches = FileBatchesAPI.FileBatches; - export import VectorStoreFileBatch = FileBatchesAPI.VectorStoreFileBatch; - export import FileBatchCreateParams = FileBatchesAPI.FileBatchCreateParams; - export import FileBatchListFilesParams = FileBatchesAPI.FileBatchListFilesParams; +VectorStores.VectorStoresPage = VectorStoresPage; +VectorStores.Files = Files; +VectorStores.VectorStoreFilesPage = VectorStoreFilesPage; +VectorStores.FileBatches = FileBatches; + +export declare namespace VectorStores { + export { + type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, + type FileChunkingStrategy as FileChunkingStrategy, + type FileChunkingStrategyParam as FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy as StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam, + type VectorStore as VectorStore, + type VectorStoreDeleted as VectorStoreDeleted, + VectorStoresPage as VectorStoresPage, + type VectorStoreCreateParams as VectorStoreCreateParams, + type VectorStoreUpdateParams as VectorStoreUpdateParams, + type VectorStoreListParams as VectorStoreListParams, + }; + + export { + Files as Files, + type VectorStoreFile as VectorStoreFile, + type VectorStoreFileDeleted as VectorStoreFileDeleted, + VectorStoreFilesPage as VectorStoreFilesPage, + type FileCreateParams as FileCreateParams, + type FileListParams as FileListParams, + }; + + export { + FileBatches as FileBatches, + type VectorStoreFileBatch as VectorStoreFileBatch, + type FileBatchCreateParams as FileBatchCreateParams, + type FileBatchListFilesParams as FileBatchListFilesParams, + }; } diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 43ef5662c..afe4dd08e 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,8 +1,42 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; -import * as ChatAPI from './chat'; import * as CompletionsAPI from './completions'; +import { + ChatCompletion, + ChatCompletionAssistantMessageParam, + ChatCompletionAudio, + ChatCompletionAudioParam, + ChatCompletionChunk, + ChatCompletionContentPart, + ChatCompletionContentPartImage, + ChatCompletionContentPartInputAudio, + ChatCompletionContentPartRefusal, + ChatCompletionContentPartText, + ChatCompletionCreateParams, + ChatCompletionCreateParamsNonStreaming, + ChatCompletionCreateParamsStreaming, + ChatCompletionFunctionCallOption, + ChatCompletionFunctionMessageParam, + ChatCompletionMessage, + ChatCompletionMessageParam, + ChatCompletionMessageToolCall, + ChatCompletionModality, + ChatCompletionNamedToolChoice, + ChatCompletionRole, + ChatCompletionStreamOptions, + ChatCompletionSystemMessageParam, + ChatCompletionTokenLogprob, + ChatCompletionTool, + ChatCompletionToolChoiceOption, + ChatCompletionToolMessageParam, + ChatCompletionUserMessageParam, + CompletionCreateParams, + CompletionCreateParamsNonStreaming, + CompletionCreateParamsStreaming, + Completions, + CreateChatCompletionRequestMessage, +} from './completions'; export class Chat extends APIResource { completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); @@ -43,42 +77,44 @@ export type ChatModel = | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-16k-0613'; -export namespace Chat { - export import ChatModel = ChatAPI.ChatModel; - export import Completions = CompletionsAPI.Completions; - export import ChatCompletion = CompletionsAPI.ChatCompletion; - export import ChatCompletionAssistantMessageParam = CompletionsAPI.ChatCompletionAssistantMessageParam; - export import ChatCompletionAudio = CompletionsAPI.ChatCompletionAudio; - export import ChatCompletionAudioParam = CompletionsAPI.ChatCompletionAudioParam; - export import ChatCompletionChunk = CompletionsAPI.ChatCompletionChunk; - export import ChatCompletionContentPart = CompletionsAPI.ChatCompletionContentPart; - export import ChatCompletionContentPartImage = CompletionsAPI.ChatCompletionContentPartImage; - export import ChatCompletionContentPartInputAudio = CompletionsAPI.ChatCompletionContentPartInputAudio; - export import ChatCompletionContentPartRefusal = CompletionsAPI.ChatCompletionContentPartRefusal; - export import ChatCompletionContentPartText = CompletionsAPI.ChatCompletionContentPartText; - export import ChatCompletionFunctionCallOption = CompletionsAPI.ChatCompletionFunctionCallOption; - export import ChatCompletionFunctionMessageParam = CompletionsAPI.ChatCompletionFunctionMessageParam; - export import ChatCompletionMessage = CompletionsAPI.ChatCompletionMessage; - export import ChatCompletionMessageParam = CompletionsAPI.ChatCompletionMessageParam; - export import ChatCompletionMessageToolCall = CompletionsAPI.ChatCompletionMessageToolCall; - export import ChatCompletionModality = CompletionsAPI.ChatCompletionModality; - export import ChatCompletionNamedToolChoice = CompletionsAPI.ChatCompletionNamedToolChoice; - export import ChatCompletionRole = CompletionsAPI.ChatCompletionRole; - export import ChatCompletionStreamOptions = CompletionsAPI.ChatCompletionStreamOptions; - export import ChatCompletionSystemMessageParam = CompletionsAPI.ChatCompletionSystemMessageParam; - export import ChatCompletionTokenLogprob = CompletionsAPI.ChatCompletionTokenLogprob; - export import ChatCompletionTool = CompletionsAPI.ChatCompletionTool; - export import ChatCompletionToolChoiceOption = CompletionsAPI.ChatCompletionToolChoiceOption; - export import ChatCompletionToolMessageParam = CompletionsAPI.ChatCompletionToolMessageParam; - export import ChatCompletionUserMessageParam = CompletionsAPI.ChatCompletionUserMessageParam; - /** - * @deprecated ChatCompletionMessageParam should be used instead - */ - export import CreateChatCompletionRequestMessage = CompletionsAPI.CreateChatCompletionRequestMessage; - export import ChatCompletionCreateParams = CompletionsAPI.ChatCompletionCreateParams; - export import CompletionCreateParams = CompletionsAPI.CompletionCreateParams; - export import ChatCompletionCreateParamsNonStreaming = CompletionsAPI.ChatCompletionCreateParamsNonStreaming; - export import CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming; - export import ChatCompletionCreateParamsStreaming = CompletionsAPI.ChatCompletionCreateParamsStreaming; - export import CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming; +Chat.Completions = Completions; + +export declare namespace Chat { + export { type ChatModel as ChatModel }; + + export { + Completions as Completions, + type ChatCompletion as ChatCompletion, + type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, + type ChatCompletionAudio as ChatCompletionAudio, + type ChatCompletionAudioParam as ChatCompletionAudioParam, + type ChatCompletionChunk as ChatCompletionChunk, + type ChatCompletionContentPart as ChatCompletionContentPart, + type ChatCompletionContentPartImage as ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, + type ChatCompletionMessage as ChatCompletionMessage, + type ChatCompletionMessageParam as ChatCompletionMessageParam, + type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, + type ChatCompletionModality as ChatCompletionModality, + type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStreamOptions as ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, + type ChatCompletionTool as ChatCompletionTool, + type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionCreateParams as ChatCompletionCreateParams, + type CompletionCreateParams as CompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + }; } diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index d439e9a25..430e52bb2 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -1185,40 +1185,39 @@ export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreat */ export type CompletionCreateParamsStreaming = ChatCompletionCreateParamsStreaming; -export namespace Completions { - export import ChatCompletion = ChatCompletionsAPI.ChatCompletion; - export import ChatCompletionAssistantMessageParam = ChatCompletionsAPI.ChatCompletionAssistantMessageParam; - export import ChatCompletionAudio = ChatCompletionsAPI.ChatCompletionAudio; - export import ChatCompletionAudioParam = ChatCompletionsAPI.ChatCompletionAudioParam; - export import ChatCompletionChunk = ChatCompletionsAPI.ChatCompletionChunk; - export import ChatCompletionContentPart = ChatCompletionsAPI.ChatCompletionContentPart; - export import ChatCompletionContentPartImage = ChatCompletionsAPI.ChatCompletionContentPartImage; - export import ChatCompletionContentPartInputAudio = ChatCompletionsAPI.ChatCompletionContentPartInputAudio; - export import ChatCompletionContentPartRefusal = ChatCompletionsAPI.ChatCompletionContentPartRefusal; - export import ChatCompletionContentPartText = ChatCompletionsAPI.ChatCompletionContentPartText; - export import ChatCompletionFunctionCallOption = ChatCompletionsAPI.ChatCompletionFunctionCallOption; - export import ChatCompletionFunctionMessageParam = ChatCompletionsAPI.ChatCompletionFunctionMessageParam; - export import ChatCompletionMessage = ChatCompletionsAPI.ChatCompletionMessage; - export import ChatCompletionMessageParam = ChatCompletionsAPI.ChatCompletionMessageParam; - export import ChatCompletionMessageToolCall = ChatCompletionsAPI.ChatCompletionMessageToolCall; - export import ChatCompletionModality = ChatCompletionsAPI.ChatCompletionModality; - export import ChatCompletionNamedToolChoice = ChatCompletionsAPI.ChatCompletionNamedToolChoice; - export import ChatCompletionRole = ChatCompletionsAPI.ChatCompletionRole; - export import ChatCompletionStreamOptions = ChatCompletionsAPI.ChatCompletionStreamOptions; - export import ChatCompletionSystemMessageParam = ChatCompletionsAPI.ChatCompletionSystemMessageParam; - export import ChatCompletionTokenLogprob = ChatCompletionsAPI.ChatCompletionTokenLogprob; - export import ChatCompletionTool = ChatCompletionsAPI.ChatCompletionTool; - export import ChatCompletionToolChoiceOption = ChatCompletionsAPI.ChatCompletionToolChoiceOption; - export import ChatCompletionToolMessageParam = ChatCompletionsAPI.ChatCompletionToolMessageParam; - export import ChatCompletionUserMessageParam = ChatCompletionsAPI.ChatCompletionUserMessageParam; - /** - * @deprecated ChatCompletionMessageParam should be used instead - */ - export import CreateChatCompletionRequestMessage = ChatCompletionsAPI.CreateChatCompletionRequestMessage; - export import ChatCompletionCreateParams = ChatCompletionsAPI.ChatCompletionCreateParams; - export import CompletionCreateParams = ChatCompletionsAPI.CompletionCreateParams; - export import ChatCompletionCreateParamsNonStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsNonStreaming; - export import CompletionCreateParamsNonStreaming = ChatCompletionsAPI.CompletionCreateParamsNonStreaming; - export import ChatCompletionCreateParamsStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsStreaming; - export import CompletionCreateParamsStreaming = ChatCompletionsAPI.CompletionCreateParamsStreaming; +export declare namespace Completions { + export { + type ChatCompletion as ChatCompletion, + type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, + type ChatCompletionAudio as ChatCompletionAudio, + type ChatCompletionAudioParam as ChatCompletionAudioParam, + type ChatCompletionChunk as ChatCompletionChunk, + type ChatCompletionContentPart as ChatCompletionContentPart, + type ChatCompletionContentPartImage as ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, + type ChatCompletionMessage as ChatCompletionMessage, + type ChatCompletionMessageParam as ChatCompletionMessageParam, + type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, + type ChatCompletionModality as ChatCompletionModality, + type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStreamOptions as ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, + type ChatCompletionTool as ChatCompletionTool, + type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionCreateParams as ChatCompletionCreateParams, + type CompletionCreateParams as CompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + }; } diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index 22803e819..d9366bf74 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -1,38 +1,38 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export { Chat, type ChatModel } from './chat'; export { - ChatCompletion, - ChatCompletionAssistantMessageParam, - ChatCompletionAudio, - ChatCompletionAudioParam, - ChatCompletionChunk, - ChatCompletionContentPart, - ChatCompletionContentPartImage, - ChatCompletionContentPartInputAudio, - ChatCompletionContentPartRefusal, - ChatCompletionContentPartText, - ChatCompletionFunctionCallOption, - ChatCompletionFunctionMessageParam, - ChatCompletionMessage, - ChatCompletionMessageParam, - ChatCompletionMessageToolCall, - ChatCompletionModality, - ChatCompletionNamedToolChoice, - ChatCompletionRole, - ChatCompletionStreamOptions, - ChatCompletionSystemMessageParam, - ChatCompletionTokenLogprob, - ChatCompletionTool, - ChatCompletionToolChoiceOption, - ChatCompletionToolMessageParam, - ChatCompletionUserMessageParam, - CreateChatCompletionRequestMessage, - ChatCompletionCreateParams, - CompletionCreateParams, - ChatCompletionCreateParamsNonStreaming, - CompletionCreateParamsNonStreaming, - ChatCompletionCreateParamsStreaming, - CompletionCreateParamsStreaming, Completions, + type ChatCompletion, + type ChatCompletionAssistantMessageParam, + type ChatCompletionAudio, + type ChatCompletionAudioParam, + type ChatCompletionChunk, + type ChatCompletionContentPart, + type ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText, + type ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam, + type ChatCompletionMessage, + type ChatCompletionMessageParam, + type ChatCompletionMessageToolCall, + type ChatCompletionModality, + type ChatCompletionNamedToolChoice, + type ChatCompletionRole, + type ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob, + type ChatCompletionTool, + type ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage, + type ChatCompletionCreateParams, + type CompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming, + type CompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming, + type CompletionCreateParamsStreaming, } from './completions'; -export { ChatModel, Chat } from './chat'; diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 7acd5d13f..94c4581a1 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -361,11 +361,13 @@ export interface CompletionCreateParamsStreaming extends CompletionCreateParamsB stream: true; } -export namespace Completions { - export import Completion = CompletionsAPI.Completion; - export import CompletionChoice = CompletionsAPI.CompletionChoice; - export import CompletionUsage = CompletionsAPI.CompletionUsage; - export import CompletionCreateParams = CompletionsAPI.CompletionCreateParams; - export import CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming; - export import CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming; +export declare namespace Completions { + export { + type Completion as Completion, + type CompletionChoice as CompletionChoice, + type CompletionUsage as CompletionUsage, + type CompletionCreateParams as CompletionCreateParams, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + }; } diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index 6d8e670a7..e2b35f530 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -2,7 +2,6 @@ import { APIResource } from '../resource'; import * as Core from '../core'; -import * as EmbeddingsAPI from './embeddings'; export class Embeddings extends APIResource { /** @@ -120,9 +119,11 @@ export interface EmbeddingCreateParams { user?: string; } -export namespace Embeddings { - export import CreateEmbeddingResponse = EmbeddingsAPI.CreateEmbeddingResponse; - export import Embedding = EmbeddingsAPI.Embedding; - export import EmbeddingModel = EmbeddingsAPI.EmbeddingModel; - export import EmbeddingCreateParams = EmbeddingsAPI.EmbeddingCreateParams; +export declare namespace Embeddings { + export { + type CreateEmbeddingResponse as CreateEmbeddingResponse, + type Embedding as Embedding, + type EmbeddingModel as EmbeddingModel, + type EmbeddingCreateParams as EmbeddingCreateParams, + }; } diff --git a/src/resources/files.ts b/src/resources/files.ts index ba01a9041..dec815a28 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -5,7 +5,6 @@ import { isRequestOptions } from '../core'; import { sleep } from '../core'; import { APIConnectionTimeoutError } from '../error'; import * as Core from '../core'; -import * as FilesAPI from './files'; import { Page } from '../pagination'; import { type Response } from '../_shims/index'; @@ -221,12 +220,16 @@ export interface FileListParams { purpose?: string; } -export namespace Files { - export import FileContent = FilesAPI.FileContent; - export import FileDeleted = FilesAPI.FileDeleted; - export import FileObject = FilesAPI.FileObject; - export import FilePurpose = FilesAPI.FilePurpose; - export import FileObjectsPage = FilesAPI.FileObjectsPage; - export import FileCreateParams = FilesAPI.FileCreateParams; - export import FileListParams = FilesAPI.FileListParams; +Files.FileObjectsPage = FileObjectsPage; + +export declare namespace Files { + export { + type FileContent as FileContent, + type FileDeleted as FileDeleted, + type FileObject as FileObject, + type FilePurpose as FilePurpose, + FileObjectsPage as FileObjectsPage, + type FileCreateParams as FileCreateParams, + type FileListParams as FileListParams, + }; } diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index b1ba34ecf..df013c8ec 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -2,21 +2,40 @@ import { APIResource } from '../../resource'; import * as JobsAPI from './jobs/jobs'; +import { + FineTuningJob, + FineTuningJobEvent, + FineTuningJobEventsPage, + FineTuningJobIntegration, + FineTuningJobWandbIntegration, + FineTuningJobWandbIntegrationObject, + FineTuningJobsPage, + JobCreateParams, + JobListEventsParams, + JobListParams, + Jobs, +} from './jobs/jobs'; export class FineTuning extends APIResource { jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); } -export namespace FineTuning { - export import Jobs = JobsAPI.Jobs; - export import FineTuningJob = JobsAPI.FineTuningJob; - export import FineTuningJobEvent = JobsAPI.FineTuningJobEvent; - export import FineTuningJobIntegration = JobsAPI.FineTuningJobIntegration; - export import FineTuningJobWandbIntegration = JobsAPI.FineTuningJobWandbIntegration; - export import FineTuningJobWandbIntegrationObject = JobsAPI.FineTuningJobWandbIntegrationObject; - export import FineTuningJobsPage = JobsAPI.FineTuningJobsPage; - export import FineTuningJobEventsPage = JobsAPI.FineTuningJobEventsPage; - export import JobCreateParams = JobsAPI.JobCreateParams; - export import JobListParams = JobsAPI.JobListParams; - export import JobListEventsParams = JobsAPI.JobListEventsParams; +FineTuning.Jobs = Jobs; +FineTuning.FineTuningJobsPage = FineTuningJobsPage; +FineTuning.FineTuningJobEventsPage = FineTuningJobEventsPage; + +export declare namespace FineTuning { + export { + Jobs as Jobs, + type FineTuningJob as FineTuningJob, + type FineTuningJobEvent as FineTuningJobEvent, + type FineTuningJobIntegration as FineTuningJobIntegration, + type FineTuningJobWandbIntegration as FineTuningJobWandbIntegration, + type FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject, + FineTuningJobsPage as FineTuningJobsPage, + FineTuningJobEventsPage as FineTuningJobEventsPage, + type JobCreateParams as JobCreateParams, + type JobListParams as JobListParams, + type JobListEventsParams as JobListEventsParams, + }; } diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts index 1d8739a0a..4954406b8 100644 --- a/src/resources/fine-tuning/index.ts +++ b/src/resources/fine-tuning/index.ts @@ -2,15 +2,15 @@ export { FineTuning } from './fine-tuning'; export { - FineTuningJob, - FineTuningJobEvent, - FineTuningJobIntegration, - FineTuningJobWandbIntegration, - FineTuningJobWandbIntegrationObject, - JobCreateParams, - JobListParams, - JobListEventsParams, FineTuningJobsPage, FineTuningJobEventsPage, Jobs, + type FineTuningJob, + type FineTuningJobEvent, + type FineTuningJobIntegration, + type FineTuningJobWandbIntegration, + type FineTuningJobWandbIntegrationObject, + type JobCreateParams, + type JobListParams, + type JobListEventsParams, } from './jobs/index'; diff --git a/src/resources/fine-tuning/jobs/checkpoints.ts b/src/resources/fine-tuning/jobs/checkpoints.ts index 02896b26d..b3018ac5f 100644 --- a/src/resources/fine-tuning/jobs/checkpoints.ts +++ b/src/resources/fine-tuning/jobs/checkpoints.ts @@ -3,7 +3,6 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as CheckpointsAPI from './checkpoints'; import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Checkpoints extends APIResource { @@ -101,8 +100,12 @@ export namespace FineTuningJobCheckpoint { export interface CheckpointListParams extends CursorPageParams {} -export namespace Checkpoints { - export import FineTuningJobCheckpoint = CheckpointsAPI.FineTuningJobCheckpoint; - export import FineTuningJobCheckpointsPage = CheckpointsAPI.FineTuningJobCheckpointsPage; - export import CheckpointListParams = CheckpointsAPI.CheckpointListParams; +Checkpoints.FineTuningJobCheckpointsPage = FineTuningJobCheckpointsPage; + +export declare namespace Checkpoints { + export { + type FineTuningJobCheckpoint as FineTuningJobCheckpoint, + FineTuningJobCheckpointsPage as FineTuningJobCheckpointsPage, + type CheckpointListParams as CheckpointListParams, + }; } diff --git a/src/resources/fine-tuning/jobs/index.ts b/src/resources/fine-tuning/jobs/index.ts index 275c776e9..7a05b48b2 100644 --- a/src/resources/fine-tuning/jobs/index.ts +++ b/src/resources/fine-tuning/jobs/index.ts @@ -1,21 +1,21 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - FineTuningJob, - FineTuningJobEvent, - FineTuningJobIntegration, - FineTuningJobWandbIntegration, - FineTuningJobWandbIntegrationObject, - JobCreateParams, - JobListParams, - JobListEventsParams, + FineTuningJobCheckpointsPage, + Checkpoints, + type FineTuningJobCheckpoint, + type CheckpointListParams, +} from './checkpoints'; +export { FineTuningJobsPage, FineTuningJobEventsPage, Jobs, + type FineTuningJob, + type FineTuningJobEvent, + type FineTuningJobIntegration, + type FineTuningJobWandbIntegration, + type FineTuningJobWandbIntegrationObject, + type JobCreateParams, + type JobListParams, + type JobListEventsParams, } from './jobs'; -export { - FineTuningJobCheckpoint, - CheckpointListParams, - FineTuningJobCheckpointsPage, - Checkpoints, -} from './checkpoints'; diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 54b5c4e6a..275fad869 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -3,8 +3,13 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as JobsAPI from './jobs'; import * as CheckpointsAPI from './checkpoints'; +import { + CheckpointListParams, + Checkpoints, + FineTuningJobCheckpoint, + FineTuningJobCheckpointsPage, +} from './checkpoints'; import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Jobs extends APIResource { @@ -445,19 +450,29 @@ export interface JobListParams extends CursorPageParams {} export interface JobListEventsParams extends CursorPageParams {} -export namespace Jobs { - export import FineTuningJob = JobsAPI.FineTuningJob; - export import FineTuningJobEvent = JobsAPI.FineTuningJobEvent; - export import FineTuningJobIntegration = JobsAPI.FineTuningJobIntegration; - export import FineTuningJobWandbIntegration = JobsAPI.FineTuningJobWandbIntegration; - export import FineTuningJobWandbIntegrationObject = JobsAPI.FineTuningJobWandbIntegrationObject; - export import FineTuningJobsPage = JobsAPI.FineTuningJobsPage; - export import FineTuningJobEventsPage = JobsAPI.FineTuningJobEventsPage; - export import JobCreateParams = JobsAPI.JobCreateParams; - export import JobListParams = JobsAPI.JobListParams; - export import JobListEventsParams = JobsAPI.JobListEventsParams; - export import Checkpoints = CheckpointsAPI.Checkpoints; - export import FineTuningJobCheckpoint = CheckpointsAPI.FineTuningJobCheckpoint; - export import FineTuningJobCheckpointsPage = CheckpointsAPI.FineTuningJobCheckpointsPage; - export import CheckpointListParams = CheckpointsAPI.CheckpointListParams; +Jobs.FineTuningJobsPage = FineTuningJobsPage; +Jobs.FineTuningJobEventsPage = FineTuningJobEventsPage; +Jobs.Checkpoints = Checkpoints; +Jobs.FineTuningJobCheckpointsPage = FineTuningJobCheckpointsPage; + +export declare namespace Jobs { + export { + type FineTuningJob as FineTuningJob, + type FineTuningJobEvent as FineTuningJobEvent, + type FineTuningJobIntegration as FineTuningJobIntegration, + type FineTuningJobWandbIntegration as FineTuningJobWandbIntegration, + type FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject, + FineTuningJobsPage as FineTuningJobsPage, + FineTuningJobEventsPage as FineTuningJobEventsPage, + type JobCreateParams as JobCreateParams, + type JobListParams as JobListParams, + type JobListEventsParams as JobListEventsParams, + }; + + export { + Checkpoints as Checkpoints, + type FineTuningJobCheckpoint as FineTuningJobCheckpoint, + FineTuningJobCheckpointsPage as FineTuningJobCheckpointsPage, + type CheckpointListParams as CheckpointListParams, + }; } diff --git a/src/resources/images.ts b/src/resources/images.ts index fdd0b8881..f4d59b941 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -2,7 +2,6 @@ import { APIResource } from '../resource'; import * as Core from '../core'; -import * as ImagesAPI from './images'; export class Images extends APIResource { /** @@ -207,11 +206,13 @@ export interface ImageGenerateParams { user?: string; } -export namespace Images { - export import Image = ImagesAPI.Image; - export import ImageModel = ImagesAPI.ImageModel; - export import ImagesResponse = ImagesAPI.ImagesResponse; - export import ImageCreateVariationParams = ImagesAPI.ImageCreateVariationParams; - export import ImageEditParams = ImagesAPI.ImageEditParams; - export import ImageGenerateParams = ImagesAPI.ImageGenerateParams; +export declare namespace Images { + export { + type Image as Image, + type ImageModel as ImageModel, + type ImagesResponse as ImagesResponse, + type ImageCreateVariationParams as ImageCreateVariationParams, + type ImageEditParams as ImageEditParams, + type ImageGenerateParams as ImageGenerateParams, + }; } diff --git a/src/resources/index.ts b/src/resources/index.ts index 15c5db77f..ad0302357 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -2,62 +2,62 @@ export * from './chat/index'; export * from './shared'; -export { AudioModel, AudioResponseFormat, Audio } from './audio/audio'; +export { Audio, type AudioModel, type AudioResponseFormat } from './audio/audio'; export { - Batch, - BatchError, - BatchRequestCounts, - BatchCreateParams, - BatchListParams, BatchesPage, Batches, + type Batch, + type BatchError, + type BatchRequestCounts, + type BatchCreateParams, + type BatchListParams, } from './batches'; export { Beta } from './beta/beta'; export { - Completion, - CompletionChoice, - CompletionUsage, - CompletionCreateParams, - CompletionCreateParamsNonStreaming, - CompletionCreateParamsStreaming, Completions, + type Completion, + type CompletionChoice, + type CompletionUsage, + type CompletionCreateParams, + type CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming, } from './completions'; export { - CreateEmbeddingResponse, - Embedding, - EmbeddingModel, - EmbeddingCreateParams, Embeddings, + type CreateEmbeddingResponse, + type Embedding, + type EmbeddingModel, + type EmbeddingCreateParams, } from './embeddings'; export { - FileContent, - FileDeleted, - FileObject, - FilePurpose, - FileCreateParams, - FileListParams, FileObjectsPage, Files, + type FileContent, + type FileDeleted, + type FileObject, + type FilePurpose, + type FileCreateParams, + type FileListParams, } from './files'; export { FineTuning } from './fine-tuning/fine-tuning'; export { - Image, - ImageModel, - ImagesResponse, - ImageCreateVariationParams, - ImageEditParams, - ImageGenerateParams, Images, + type Image, + type ImageModel, + type ImagesResponse, + type ImageCreateVariationParams, + type ImageEditParams, + type ImageGenerateParams, } from './images'; -export { Model, ModelDeleted, ModelsPage, Models } from './models'; +export { ModelsPage, Models, type Model, type ModelDeleted } from './models'; export { - Moderation, - ModerationImageURLInput, - ModerationModel, - ModerationMultiModalInput, - ModerationTextInput, - ModerationCreateResponse, - ModerationCreateParams, Moderations, + type Moderation, + type ModerationImageURLInput, + type ModerationModel, + type ModerationMultiModalInput, + type ModerationTextInput, + type ModerationCreateResponse, + type ModerationCreateParams, } from './moderations'; -export { Upload, UploadCreateParams, UploadCompleteParams, Uploads } from './uploads/uploads'; +export { Uploads, type Upload, type UploadCreateParams, type UploadCompleteParams } from './uploads/uploads'; diff --git a/src/resources/models.ts b/src/resources/models.ts index 178915747..6d8cd5296 100644 --- a/src/resources/models.ts +++ b/src/resources/models.ts @@ -2,7 +2,6 @@ import { APIResource } from '../resource'; import * as Core from '../core'; -import * as ModelsAPI from './models'; import { Page } from '../pagination'; export class Models extends APIResource { @@ -69,8 +68,8 @@ export interface ModelDeleted { object: string; } -export namespace Models { - export import Model = ModelsAPI.Model; - export import ModelDeleted = ModelsAPI.ModelDeleted; - export import ModelsPage = ModelsAPI.ModelsPage; +Models.ModelsPage = ModelsPage; + +export declare namespace Models { + export { type Model as Model, type ModelDeleted as ModelDeleted, ModelsPage as ModelsPage }; } diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index ba800509e..cdde12a62 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -2,7 +2,6 @@ import { APIResource } from '../resource'; import * as Core from '../core'; -import * as ModerationsAPI from './moderations'; export class Moderations extends APIResource { /** @@ -357,12 +356,14 @@ export interface ModerationCreateParams { model?: (string & {}) | ModerationModel; } -export namespace Moderations { - export import Moderation = ModerationsAPI.Moderation; - export import ModerationImageURLInput = ModerationsAPI.ModerationImageURLInput; - export import ModerationModel = ModerationsAPI.ModerationModel; - export import ModerationMultiModalInput = ModerationsAPI.ModerationMultiModalInput; - export import ModerationTextInput = ModerationsAPI.ModerationTextInput; - export import ModerationCreateResponse = ModerationsAPI.ModerationCreateResponse; - export import ModerationCreateParams = ModerationsAPI.ModerationCreateParams; +export declare namespace Moderations { + export { + type Moderation as Moderation, + type ModerationImageURLInput as ModerationImageURLInput, + type ModerationModel as ModerationModel, + type ModerationMultiModalInput as ModerationMultiModalInput, + type ModerationTextInput as ModerationTextInput, + type ModerationCreateResponse as ModerationCreateResponse, + type ModerationCreateParams as ModerationCreateParams, + }; } diff --git a/src/resources/uploads/index.ts b/src/resources/uploads/index.ts index 1a353d312..200d3567e 100644 --- a/src/resources/uploads/index.ts +++ b/src/resources/uploads/index.ts @@ -1,4 +1,4 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { Upload, UploadCreateParams, UploadCompleteParams, Uploads } from './uploads'; -export { UploadPart, PartCreateParams, Parts } from './parts'; +export { Parts, type UploadPart, type PartCreateParams } from './parts'; +export { Uploads, type Upload, type UploadCreateParams, type UploadCompleteParams } from './uploads'; diff --git a/src/resources/uploads/parts.ts b/src/resources/uploads/parts.ts index a4af5c606..9b54c99e6 100644 --- a/src/resources/uploads/parts.ts +++ b/src/resources/uploads/parts.ts @@ -2,7 +2,6 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as PartsAPI from './parts'; export class Parts extends APIResource { /** @@ -62,7 +61,6 @@ export interface PartCreateParams { data: Core.Uploadable; } -export namespace Parts { - export import UploadPart = PartsAPI.UploadPart; - export import PartCreateParams = PartsAPI.PartCreateParams; +export declare namespace Parts { + export { type UploadPart as UploadPart, type PartCreateParams as PartCreateParams }; } diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index 1c3ed708d..78fa3a7b5 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -2,9 +2,9 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as UploadsAPI from './uploads'; import * as FilesAPI from '../files'; import * as PartsAPI from './parts'; +import { PartCreateParams, Parts, UploadPart } from './parts'; export class Uploads extends APIResource { parts: PartsAPI.Parts = new PartsAPI.Parts(this._client); @@ -159,11 +159,14 @@ export interface UploadCompleteParams { md5?: string; } -export namespace Uploads { - export import Upload = UploadsAPI.Upload; - export import UploadCreateParams = UploadsAPI.UploadCreateParams; - export import UploadCompleteParams = UploadsAPI.UploadCompleteParams; - export import Parts = PartsAPI.Parts; - export import UploadPart = PartsAPI.UploadPart; - export import PartCreateParams = PartsAPI.PartCreateParams; +Uploads.Parts = Parts; + +export declare namespace Uploads { + export { + type Upload as Upload, + type UploadCreateParams as UploadCreateParams, + type UploadCompleteParams as UploadCompleteParams, + }; + + export { Parts as Parts, type UploadPart as UploadPart, type PartCreateParams as PartCreateParams }; } diff --git a/tsconfig.json b/tsconfig.json index 5f99085fc..09a702fca 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -11,7 +11,7 @@ "paths": { "openai/_shims/auto/*": ["src/_shims/auto/*-node"], "openai/*": ["src/*"], - "openai": ["src/index.ts"], + "openai": ["src/index.ts"] }, "noEmit": true, From 362d868426e5777183a52da8df432fa34f722442 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 1 Nov 2024 16:54:12 +0000 Subject: [PATCH 303/533] feat: publish to jsr (#1165) --- .github/workflows/create-releases.yml | 22 +-- .github/workflows/publish-deno.yml | 44 ----- .github/workflows/publish-jsr.yml | 30 ++++ .gitignore | 2 +- README.md | 6 +- bin/publish-jsr | 11 ++ jsr.json | 8 + release-please-config.json | 2 +- scripts/build-deno | 41 +---- scripts/git-publish-deno.sh | 77 --------- scripts/utils/denoify.ts | 226 -------------------------- src/core.ts | 10 +- src/error.ts | 2 +- src/index.ts | 28 ++-- src/streaming.ts | 4 +- tsconfig.deno.json | 11 +- tsconfig.json | 1 + 17 files changed, 88 insertions(+), 437 deletions(-) delete mode 100644 .github/workflows/publish-deno.yml create mode 100644 .github/workflows/publish-jsr.yml create mode 100644 bin/publish-jsr create mode 100644 jsr.json delete mode 100755 scripts/git-publish-deno.sh delete mode 100644 scripts/utils/denoify.ts diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml index d5ae1f755..3a753b31c 100644 --- a/.github/workflows/create-releases.yml +++ b/.github/workflows/create-releases.yml @@ -22,27 +22,12 @@ jobs: repo: ${{ github.event.repository.full_name }} stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - - name: Generate a token - id: generate_token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} - owner: 'openai' - repositories: 'openai-node,openai-deno-build' - - name: Set up Node if: ${{ steps.release.outputs.releases_created }} uses: actions/setup-node@v3 with: node-version: '18' - - name: Set up Deno - if: ${{ steps.release.outputs.releases_created }} - uses: denoland/setup-deno@v1 - with: - deno-version: v1.35.1 - - name: Install dependencies if: ${{ steps.release.outputs.releases_created }} run: | @@ -55,11 +40,8 @@ jobs: env: NPM_TOKEN: ${{ secrets.OPENAI_NPM_TOKEN || secrets.NPM_TOKEN }} - - name: Publish to Deno + - name: Publish to JSR if: ${{ steps.release.outputs.releases_created }} run: | - bash ./scripts/git-publish-deno.sh - env: - DENO_PUSH_REMOTE_URL: https://username:${{ steps.generate_token.outputs.token }}@github.com/openai/openai-deno-build.git - DENO_PUSH_BRANCH: main + bash ./bin/publish-jsr diff --git a/.github/workflows/publish-deno.yml b/.github/workflows/publish-deno.yml deleted file mode 100644 index 894c516a0..000000000 --- a/.github/workflows/publish-deno.yml +++ /dev/null @@ -1,44 +0,0 @@ -# workflow for re-running publishing to Deno in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-deno.yml -name: Publish Deno -on: - workflow_dispatch: - -jobs: - publish: - name: publish - runs-on: ubuntu-latest - environment: publish - - steps: - - uses: actions/checkout@v4 - - - name: Generate a token - id: generate_token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} - owner: 'openai' - repositories: 'openai-node,openai-deno-build' - - - name: Set up Node - uses: actions/setup-node@v3 - with: - node-version: '18' - - - name: Set up Deno - uses: denoland/setup-deno@v1 - with: - deno-version: v1.35.1 - - - name: Install dependencies - run: | - yarn install - - - name: Publish to Deno - run: | - bash ./scripts/git-publish-deno.sh - env: - DENO_PUSH_REMOTE_URL: https://username:${{ steps.generate_token.outputs.token }}@github.com/openai/openai-deno-build.git - DENO_PUSH_BRANCH: main diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml new file mode 100644 index 000000000..1e46d6bfb --- /dev/null +++ b/.github/workflows/publish-jsr.yml @@ -0,0 +1,30 @@ +# workflow for re-running publishing to JSR in case it fails for some reason +# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-jsr.yml +name: Publish JSR +on: + workflow_dispatch: + +jobs: + publish: + name: publish + runs-on: ubuntu-latest + permissions: + contents: read + id-token: write + environment: publish + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node + uses: actions/setup-node@v3 + with: + node-version: '18' + + - name: Install dependencies + run: | + yarn install + + - name: Publish to JSR + run: | + bash ./bin/publish-jsr diff --git a/.gitignore b/.gitignore index 0af7568e5..81c4c41ca 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,7 @@ yarn-error.log codegen.log Brewfile.lock.json dist -/deno +dist-deno /*.tgz .idea/ tmp diff --git a/README.md b/README.md index 776ea4049..caa3f9d4a 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # OpenAI Node API Library -[![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) +[![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) [![JSR Version](https://jsr.io/badges/@openai/openai)](https://jsr.io/@openai/openai) This library provides convenient access to the OpenAI REST API from TypeScript or JavaScript. @@ -14,12 +14,12 @@ To learn how to use the OpenAI API, check out our [API Reference](https://platfo npm install openai ``` -You can import in Deno via: +You can also import from jsr: ```ts -import OpenAI from 'https://deno.land/x/openai@v4.69.0/mod.ts'; +import OpenAI from 'jsr:@openai/openai'; ``` diff --git a/bin/publish-jsr b/bin/publish-jsr new file mode 100644 index 000000000..1b7365087 --- /dev/null +++ b/bin/publish-jsr @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -eux + +# Build the project +yarn build + +# Navigate to the dist directory +cd dist-deno + +npx jsr publish ${JSR_TOKEN:+"--token=$JSR_TOKEN"} diff --git a/jsr.json b/jsr.json new file mode 100644 index 000000000..fefb5b291 --- /dev/null +++ b/jsr.json @@ -0,0 +1,8 @@ +{ + "name": "@openai/openai", + "version": "4.47.1", + "exports": "./index.ts", + "publish": { + "exclude": ["!."] + } +} diff --git a/release-please-config.json b/release-please-config.json index 0a9347796..377a76e99 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -63,6 +63,6 @@ "extra-files": [ "src/version.ts", "README.md", - "scripts/build-deno" + "jsr.json" ] } diff --git a/scripts/build-deno b/scripts/build-deno index be17942df..7d542cf24 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -4,47 +4,16 @@ set -exuo pipefail cd "$(dirname "$0")/.." -rm -rf deno; mkdir deno -cp -rp src/* deno +rm -rf dist-deno; mkdir dist-deno +cp -rp src/* jsr.json dist-deno -# x-release-please-start-version -cat << EOF > deno/README.md -# OpenAI Node API Library - Deno build - -This is a build produced from https://github.com/openai/openai-node – please go there to read the source and docs, file issues, etc. - -Usage: - -\`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.69.0/mod.ts"; - -const client = new OpenAI(); -\`\`\` - -Note that in most Deno environments, you can also do this: - -\`\`\`ts -import OpenAI from "npm:openai"; -\`\`\` -EOF -# x-release-please-end - -rm deno/_shims/auto/*-node.ts -for dir in deno/_shims deno/_shims/auto; do +rm dist-deno/_shims/auto/*-node.ts +for dir in dist-deno/_shims dist-deno/_shims/auto; do rm "${dir}"/*.{d.ts,js,mjs} for file in "${dir}"/*-deno.ts; do mv -- "$file" "${file%-deno.ts}.ts" done done for file in LICENSE CHANGELOG.md; do - if [ -e "${file}" ]; then cp "${file}" deno; fi + if [ -e "${file}" ]; then cp "${file}" dist-deno; fi done -npm exec ts-node -T -- scripts/utils/denoify.ts -deno fmt deno -deno check deno/mod.ts -if [ -e deno_tests ]; then - deno test deno_tests --allow-env -fi - -# make sure that nothing crashes when we load the Deno module -(cd deno && deno run mod.ts) diff --git a/scripts/git-publish-deno.sh b/scripts/git-publish-deno.sh deleted file mode 100755 index 701db735e..000000000 --- a/scripts/git-publish-deno.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash - -set -exuo pipefail - -cd "$(dirname "$0")/.." - -# This script pushes the contents of the `deno` directory to the `deno` branch, -# and creates a `vx.x.x-deno` tag, so that Deno users can -# import OpenAI from "https://raw.githubusercontent.com/openai/openai-node/vx.x.x-deno/mod.ts" - -# It's also possible to publish to deno.land. You can do this by: -# - Creating a separate GitHub repo -# - Add the deno.land webhook to the repo as described at https://deno.com/add_module -# - Set the following environment variables when running this script: -# - DENO_PUSH_REMOTE_URL - the remote url of the separate GitHub repo -# - DENO_PUSH_BRANCH - the branch you want to push to in that repo (probably `main`) -# - DENO_MAIN_BRANCH - the branch you want as the main branch in that repo (probably `main`, sometimes `master`) -# - DENO_PUSH_VERSION - defaults to version in package.json -# - DENO_PUSH_RELEASE_TAG - defaults to v$DENO_PUSH_VERSION-deno - -die () { - echo >&2 "$@" - exit 1 -} - -# Allow caller to set the following environment variables, but provide defaults -# if unset -# : "${FOO:=bar}" sets FOO=bar unless it's set and non-empty -# https://stackoverflow.com/questions/307503/whats-a-concise-way-to-check-that-environment-variables-are-set-in-a-unix-shell -# https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html - -: "${DENO_PUSH_VERSION:=$(node -p 'require("./package.json").version')}" -: "${DENO_PUSH_BRANCH:=deno}" -: "${DENO_MAIN_BRANCH:=main}" -: "${DENO_PUSH_REMOTE_URL:=$(git remote get-url origin)}" -: "${DENO_GIT_USER_NAME:="Stainless Bot"}" -: "${DENO_GIT_USER_EMAIL:="bot@stainlessapi.com"}" -if [[ $DENO_PUSH_BRANCH = "deno" ]]; then - : "${DENO_PUSH_RELEASE_TAG:="v$DENO_PUSH_VERSION-deno"}" -else - : "${DENO_PUSH_RELEASE_TAG:="v$DENO_PUSH_VERSION"}" -fi - -if [ ! -e deno ]; then ./scripts/build; fi - -# We want to commit and push a branch where everything inside the deno -# directory is at root level in the branch. - -# We can do this by temporarily creating a git repository inside deno, -# committing files to the branch, and pushing it to the remote. - -cd deno -rm -rf .git -git init -b "$DENO_MAIN_BRANCH" -git remote add origin "$DENO_PUSH_REMOTE_URL" -if git fetch origin "$DENO_PUSH_RELEASE_TAG"; then - die "Tag $DENO_PUSH_RELEASE_TAG already exists" -fi -if git fetch origin "$DENO_PUSH_BRANCH"; then - # the branch already exists on the remote; "check out" the branch without - # changing files in the working directory - git branch "$DENO_PUSH_BRANCH" -t origin/"$DENO_PUSH_BRANCH" - git symbolic-ref HEAD refs/heads/"$DENO_PUSH_BRANCH" - git reset -else - # the branch doesn't exist on the remote yet - git checkout -b "$DENO_PUSH_BRANCH" -fi - -git config user.email "$DENO_GIT_USER_EMAIL" -git config user.name "$DENO_GIT_USER_NAME" - -git add . -git commit -m "chore(deno): release $DENO_PUSH_VERSION" -git tag -a "$DENO_PUSH_RELEASE_TAG" -m "release $DENO_PUSH_VERSION" -git push --tags --set-upstream origin "$DENO_PUSH_BRANCH" -rm -rf .git diff --git a/scripts/utils/denoify.ts b/scripts/utils/denoify.ts deleted file mode 100644 index 52705802a..000000000 --- a/scripts/utils/denoify.ts +++ /dev/null @@ -1,226 +0,0 @@ -import path from 'path'; -import * as tm from 'ts-morph'; -import { name as pkgName } from '../../package.json'; -import fs from 'fs'; - -const rootDir = path.resolve(__dirname, '../..'); -const denoDir = path.join(rootDir, 'deno'); -const tsConfigFilePath = path.join(rootDir, 'tsconfig.deno.json'); - -async function denoify() { - const project = new tm.Project({ tsConfigFilePath }); - - for (const file of project.getSourceFiles()) { - if (!file.getFilePath().startsWith(denoDir + '/')) continue; - - let addedBuffer = false, - addedProcess = false; - file.forEachDescendant((node) => { - switch (node.getKind()) { - case tm.ts.SyntaxKind.ExportDeclaration: { - const decl: tm.ExportDeclaration = node as any; - if (decl.isTypeOnly()) return; - for (const named of decl.getNamedExports()) { - // Convert `export { Foo } from './foo.ts'` - // to `export { type Foo } from './foo.ts'` - // if `./foo.ts` only exports types for `Foo` - if (!named.isTypeOnly() && !hasValueDeclarations(named)) { - named.replaceWithText(`type ${named.getText()}`); - } - } - break; - } - case tm.ts.SyntaxKind.ImportEqualsDeclaration: { - const decl: tm.ImportEqualsDeclaration = node as any; - if (decl.isTypeOnly()) return; - - const ref = decl.getModuleReference(); - if (!hasValueDeclarations(ref)) { - const params = isBuiltinType(ref.getType()) ? [] : ref.getType().getTypeArguments(); - if (params.length) { - const paramsStr = params.map((p: tm.TypeParameter) => p.getText()).join(', '); - const bindingsStr = params - .map((p: tm.TypeParameter) => p.getSymbol()?.getName() || p.getText()) - .join(', '); - decl.replaceWithText( - `export type ${decl.getName()}<${paramsStr}> = ${ref.getText()}<${bindingsStr}>`, - ); - } else { - decl.replaceWithText(`export type ${decl.getName()} = ${ref.getText()}`); - } - } - break; - } - case tm.ts.SyntaxKind.Identifier: { - const id = node as tm.Identifier; - if (!addedBuffer && id.getText() === 'Buffer') { - addedBuffer = true; - file?.addVariableStatement({ - declarations: [ - { - name: 'Buffer', - type: 'any', - }, - ], - hasDeclareKeyword: true, - }); - file?.addTypeAlias({ - name: 'Buffer', - type: 'any', - }); - } - if (!addedProcess && id.getText() === 'process') { - addedProcess = true; - file?.addVariableStatement({ - declarations: [ - { - name: 'process', - type: 'any', - }, - ], - hasDeclareKeyword: true, - }); - } - } - } - }); - } - - await project.save(); - - for (const file of project.getSourceFiles()) { - if (!file.getFilePath().startsWith(denoDir + '/')) continue; - for (const decl of [...file.getImportDeclarations(), ...file.getExportDeclarations()]) { - const moduleSpecifier = decl.getModuleSpecifier(); - if (!moduleSpecifier) continue; - let specifier = moduleSpecifier.getLiteralValue().replace(/^node:/, ''); - if (!specifier || specifier.startsWith('http')) continue; - - if (nodeStdModules.has(specifier)) { - // convert node builtins to deno.land/std - specifier = `https://deno.land/std@0.177.0/node/${specifier}.ts`; - } else if (specifier.startsWith(pkgName + '/')) { - // convert self-referencing module specifiers to relative paths - specifier = file.getRelativePathAsModuleSpecifierTo(denoDir + specifier.substring(pkgName.length)); - } else if (!decl.isModuleSpecifierRelative()) { - specifier = `npm:${specifier}`; - } - - if (specifier.startsWith('./') || specifier.startsWith('../')) { - // there may be CJS directory module specifiers that implicitly resolve - // to /index.ts. Add an explicit /index.ts to the end - const sourceFile = decl.getModuleSpecifierSourceFile(); - if (sourceFile && /\/index\.ts$/.test(sourceFile.getFilePath()) && !/\/mod\.ts$/.test(specifier)) { - if (/\/index(\.ts)?$/.test(specifier)) { - specifier = specifier.replace(/\/index(\.ts)?$/, '/mod.ts'); - } else { - specifier += '/mod.ts'; - } - } - // add explicit .ts file extensions to relative module specifiers - specifier = specifier.replace(/(\.[^./]*)?$/, '.ts'); - } - moduleSpecifier.replaceWithText(JSON.stringify(specifier)); - } - } - - await project.save(); - - await Promise.all( - project.getSourceFiles().map(async (f) => { - const filePath = f.getFilePath(); - if (filePath.endsWith('index.ts')) { - const newPath = filePath.replace(/index\.ts$/, 'mod.ts'); - await fs.promises.rename(filePath, newPath); - } - }), - ); -} - -const nodeStdModules = new Set([ - 'assert', - 'assertion_error', - 'async_hooks', - 'buffer', - 'child_process', - 'cluster', - 'console', - 'constants', - 'crypto', - 'dgram', - 'diagnostics_channel', - 'dns', - 'domain', - 'events', - 'fs', - 'global', - 'http', - 'http2', - 'https', - 'inspector', - 'module_all', - 'module_esm', - 'module', - 'net', - 'os', - 'path', - 'perf_hooks', - 'process', - 'punycode', - 'querystring', - 'readline', - 'repl', - 'stream', - 'string_decoder', - 'sys', - 'timers', - 'tls', - 'tty', - 'upstream_modules', - 'url', - 'util', - 'v8', - 'vm', - 'wasi', - 'worker_threads', - 'zlib', -]); - -const typeDeclarationKinds = new Set([ - tm.ts.SyntaxKind.InterfaceDeclaration, - tm.ts.SyntaxKind.ModuleDeclaration, - tm.ts.SyntaxKind.TypeAliasDeclaration, -]); - -const builtinTypeNames = new Set(['Array', 'Set', 'Map', 'Record', 'Promise']); - -function isBuiltinType(type: tm.Type): boolean { - const symbol = type.getSymbol(); - return ( - symbol != null && - builtinTypeNames.has(symbol.getName()) && - symbol.getDeclarations().some((d) => d.getSourceFile().getFilePath().includes('node_modules/typescript')) - ); -} - -function hasValueDeclarations(nodes?: tm.Node): boolean; -function hasValueDeclarations(nodes?: tm.Node[]): boolean; -function hasValueDeclarations(nodes?: tm.Node | tm.Node[]): boolean { - if (nodes && !Array.isArray(nodes)) { - return ( - !isBuiltinType(nodes.getType()) && hasValueDeclarations(nodes.getType().getSymbol()?.getDeclarations()) - ); - } - return nodes ? - nodes.some((n) => { - const parent = n.getParent(); - return ( - !typeDeclarationKinds.has(n.getKind()) && - // sometimes the node will be the right hand side of a type alias - (!parent || !typeDeclarationKinds.has(parent.getKind())) - ); - }) - : false; -} - -denoify(); diff --git a/src/core.ts b/src/core.ts index 9d90178ab..0c8e69ffc 100644 --- a/src/core.ts +++ b/src/core.ts @@ -431,7 +431,7 @@ export abstract class APIClient { error: Object | undefined, message: string | undefined, headers: Headers | undefined, - ) { + ): APIError { return APIError.generate(status, error, message, headers); } @@ -703,9 +703,9 @@ export abstract class AbstractPage implements AsyncIterable { return await this.#client.requestAPIList(this.constructor as any, nextOptions); } - async *iterPages() { + async *iterPages(): AsyncGenerator { // eslint-disable-next-line @typescript-eslint/no-this-alias - let page: AbstractPage = this; + let page: this = this; yield page; while (page.hasNextPage()) { page = await page.getNextPage(); @@ -713,7 +713,7 @@ export abstract class AbstractPage implements AsyncIterable { } } - async *[Symbol.asyncIterator]() { + async *[Symbol.asyncIterator](): AsyncGenerator { for await (const page of this.iterPages()) { for (const item of page.getPaginatedItems()) { yield item; @@ -762,7 +762,7 @@ export class PagePromise< * console.log(item) * } */ - async *[Symbol.asyncIterator]() { + async *[Symbol.asyncIterator](): AsyncGenerator { const page = await this; for await (const item of page) { yield item; diff --git a/src/error.ts b/src/error.ts index 87eeea046..72b4f7bfd 100644 --- a/src/error.ts +++ b/src/error.ts @@ -59,7 +59,7 @@ export class APIError extends OpenAIError { errorResponse: Object | undefined, message: string | undefined, headers: Headers | undefined, - ) { + ): APIError { if (!status) { return new APIConnectionError({ message, cause: castToError(errorResponse) }); } diff --git a/src/index.ts b/src/index.ts index c1506997b..33b0848e4 100644 --- a/src/index.ts +++ b/src/index.ts @@ -305,19 +305,21 @@ export class OpenAI extends Core.APIClient { static fileFromPath = Uploads.fileFromPath; } -export const OpenAIError = Errors.OpenAIError; -export const APIError = Errors.APIError; -export const APIConnectionError = Errors.APIConnectionError; -export const APIConnectionTimeoutError = Errors.APIConnectionTimeoutError; -export const APIUserAbortError = Errors.APIUserAbortError; -export const NotFoundError = Errors.NotFoundError; -export const ConflictError = Errors.ConflictError; -export const RateLimitError = Errors.RateLimitError; -export const BadRequestError = Errors.BadRequestError; -export const AuthenticationError = Errors.AuthenticationError; -export const InternalServerError = Errors.InternalServerError; -export const PermissionDeniedError = Errors.PermissionDeniedError; -export const UnprocessableEntityError = Errors.UnprocessableEntityError; +export { + OpenAIError, + APIError, + APIConnectionError, + APIConnectionTimeoutError, + APIUserAbortError, + NotFoundError, + ConflictError, + RateLimitError, + BadRequestError, + AuthenticationError, + InternalServerError, + PermissionDeniedError, + UnprocessableEntityError, +} from './error'; export import toFile = Uploads.toFile; export import fileFromPath = Uploads.fileFromPath; diff --git a/src/streaming.ts b/src/streaming.ts index 597ee89fa..b48f3ff1d 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -22,7 +22,7 @@ export class Stream implements AsyncIterable { this.controller = controller; } - static fromSSEResponse(response: Response, controller: AbortController) { + static fromSSEResponse(response: Response, controller: AbortController): Stream { let consumed = false; async function* iterator(): AsyncIterator { @@ -90,7 +90,7 @@ export class Stream implements AsyncIterable { * Generates a Stream from a newline-separated ReadableStream * where each item is a JSON value. */ - static fromReadableStream(readableStream: ReadableStream, controller: AbortController) { + static fromReadableStream(readableStream: ReadableStream, controller: AbortController): Stream { let consumed = false; async function* iterLines(): AsyncGenerator { diff --git a/tsconfig.deno.json b/tsconfig.deno.json index d0e9473d9..849e070db 100644 --- a/tsconfig.deno.json +++ b/tsconfig.deno.json @@ -1,19 +1,14 @@ { "extends": "./tsconfig.json", - "include": ["deno"], + "include": ["dist-deno"], "exclude": [], "compilerOptions": { - "rootDir": "./deno", + "rootDir": "./dist-deno", "lib": ["es2020", "DOM"], - "paths": { - "openai/_shims/auto/*": ["deno/_shims/auto/*-deno"], - "openai/*": ["deno/*"], - "openai": ["deno/index.ts"], - }, "noEmit": true, "declaration": true, "declarationMap": true, - "outDir": "deno", + "outDir": "dist-deno", "pretty": true, "sourceMap": true } diff --git a/tsconfig.json b/tsconfig.json index 09a702fca..33767f7b1 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -32,6 +32,7 @@ "noUncheckedIndexedAccess": true, "noImplicitOverride": true, "noPropertyAccessFromIndexSignature": true, + "isolatedModules": true, "skipLibCheck": true } From 1c1417ef15f0f7d718773447e338b429c7871723 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 1 Nov 2024 16:57:31 +0000 Subject: [PATCH 304/533] chore(internal): fix isolated modules exports --- src/resources/beta/chat/completions.ts | 34 ++++++++++++++------------ 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts index 03ea0aab5..c9360a95c 100644 --- a/src/resources/beta/chat/completions.ts +++ b/src/resources/beta/chat/completions.ts @@ -3,29 +3,14 @@ import * as Core from '../../../core'; import { APIResource } from '../../../resource'; import { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from '../../../lib/ChatCompletionRunner'; -export { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from '../../../lib/ChatCompletionRunner'; import { ChatCompletionStreamingRunner, ChatCompletionStreamingFunctionRunnerParams, } from '../../../lib/ChatCompletionStreamingRunner'; -export { - ChatCompletionStreamingRunner, - ChatCompletionStreamingFunctionRunnerParams, -} from '../../../lib/ChatCompletionStreamingRunner'; import { BaseFunctionsArgs } from '../../../lib/RunnableFunction'; -export { - RunnableFunction, - RunnableFunctions, - RunnableFunctionWithParse, - RunnableFunctionWithoutParse, - ParsingFunction, - ParsingToolFunction, -} from '../../../lib/RunnableFunction'; import { RunnerOptions } from '../../../lib/AbstractChatCompletionRunner'; import { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; -export { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; import { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; -export { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; import { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; import { ChatCompletion, @@ -34,7 +19,26 @@ import { ChatCompletionMessageToolCall, } from '../../chat/completions'; import { ExtractParsedContentFromParams, parseChatCompletion, validateInputTools } from '../../../lib/parser'; + +export { + ChatCompletionStreamingRunner, + type ChatCompletionStreamingFunctionRunnerParams, +} from '../../../lib/ChatCompletionStreamingRunner'; +export { + type RunnableFunction, + type RunnableFunctions, + type RunnableFunctionWithParse, + type RunnableFunctionWithoutParse, + ParsingFunction, + ParsingToolFunction, +} from '../../../lib/RunnableFunction'; +export { type ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; +export { type ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; export { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; +export { + ChatCompletionRunner, + type ChatCompletionFunctionRunnerParams, +} from '../../../lib/ChatCompletionRunner'; export interface ParsedFunction extends ChatCompletionMessageToolCall.Function { parsed_arguments?: unknown; From f5260ff160cec852f58ff92300d473c05b53f02e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 16:58:17 +0000 Subject: [PATCH 305/533] release: 4.70.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 18 ++++++++++++++++++ package.json | 2 +- src/version.ts | 2 +- 4 files changed, 21 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 65aac9575..d07bcaba7 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.69.0" + ".": "4.70.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index b3b52aaa3..3f355b2b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 4.70.0 (2024-11-01) + +Full Changelog: [v4.69.0...v4.70.0](https://github.com/openai/openai-node/compare/v4.69.0...v4.70.0) + +### Features + +* publish to jsr ([#1165](https://github.com/openai/openai-node/issues/1165)) ([5aa93a7](https://github.com/openai/openai-node/commit/5aa93a7fe704ef1ad077787852db38dc29104534)) + + +### Chores + +* **internal:** fix isolated modules exports ([9cd1958](https://github.com/openai/openai-node/commit/9cd19584dcc6f4004ea1adcee917aa88a37d5f1c)) + + +### Refactors + +* use type imports for type-only imports ([#1159](https://github.com/openai/openai-node/issues/1159)) ([07bbaf6](https://github.com/openai/openai-node/commit/07bbaf6ecac9a5e36471a35488020853ddf9214f)) + ## 4.69.0 (2024-10-30) Full Changelog: [v4.68.4...v4.69.0](https://github.com/openai/openai-node/compare/v4.68.4...v4.69.0) diff --git a/package.json b/package.json index 9e32feabb..f200fdb53 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.69.0", + "version": "4.70.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index be250f2d6..f298c56c6 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.69.0'; // x-release-please-version +export const VERSION = '4.70.0'; // x-release-please-version From 9180285caf1aec6da05aa4a0058db39bd875cb60 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 1 Nov 2024 19:29:35 +0000 Subject: [PATCH 306/533] fix: don't require deno to run build-deno (#1167) --- scripts/build | 2 +- src/streaming.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/build b/scripts/build index b4d686af5..0246c90e3 100755 --- a/scripts/build +++ b/scripts/build @@ -50,7 +50,7 @@ node scripts/utils/postprocess-files.cjs (cd dist && node -e 'require("openai")') (cd dist && node -e 'import("openai")' --input-type=module) -if [ "${OPENAI_DISABLE_DENO_BUILD:-0}" != "1" ] && command -v deno &> /dev/null && [ -e ./scripts/build-deno ] +if [ "${OPENAI_DISABLE_DENO_BUILD:-0}" != "1" ] && [ -e ./scripts/build-deno ] then ./scripts/build-deno fi diff --git a/src/streaming.ts b/src/streaming.ts index b48f3ff1d..2891e6ac3 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -2,7 +2,7 @@ import { ReadableStream, type Response } from './_shims/index'; import { OpenAIError } from './error'; import { LineDecoder } from './internal/decoders/line'; -import { APIError } from 'openai/error'; +import { APIError } from './error'; type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; From 9b569856e1f39156cebbb939b7b7149b0f494c88 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 19:29:56 +0000 Subject: [PATCH 307/533] release: 4.70.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ package.json | 2 +- src/version.ts | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d07bcaba7..f458b24a5 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.70.0" + ".": "4.70.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f355b2b4..7525af900 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.70.1 (2024-11-01) + +Full Changelog: [v4.70.0...v4.70.1](https://github.com/openai/openai-node/compare/v4.70.0...v4.70.1) + +### Bug Fixes + +* don't require deno to run build-deno ([#1167](https://github.com/openai/openai-node/issues/1167)) ([9d857bc](https://github.com/openai/openai-node/commit/9d857bc531a0bb3939f7660e49b31ccc38f60dd3)) + ## 4.70.0 (2024-11-01) Full Changelog: [v4.69.0...v4.70.0](https://github.com/openai/openai-node/compare/v4.69.0...v4.70.0) diff --git a/package.json b/package.json index f200fdb53..7e14e3b3b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.70.0", + "version": "4.70.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index f298c56c6..654369eef 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.70.0'; // x-release-please-version +export const VERSION = '4.70.1'; // x-release-please-version From a3df48926c506bfe649336adcf14011e20f539b9 Mon Sep 17 00:00:00 2001 From: Young-Jin Park Date: Fri, 1 Nov 2024 15:36:55 -0400 Subject: [PATCH 308/533] fix: skip deno ecosystem test --- ecosystem-tests/cli.ts | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index c03ea668a..b0ff712f1 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -95,15 +95,16 @@ const projectRunners = { await run('bun', ['test']); } }, - deno: async () => { - // we don't need to explicitly install the package here - // because our deno setup relies on `rootDir/deno` to exist - // which is an artifact produced from our build process - await run('deno', ['task', 'install']); - await run('deno', ['task', 'check']); - - if (state.live) await run('deno', ['task', 'test']); - }, + // Temporarily comment this out until we can test with JSR transformations end-to-end. + // deno: async () => { + // // we don't need to explicitly install the package here + // // because our deno setup relies on `rootDir/deno` to exist + // // which is an artifact produced from our build process + // await run('deno', ['task', 'install']); + // await run('deno', ['task', 'check']); + + // if (state.live) await run('deno', ['task', 'test']); + // }, }; let projectNames = Object.keys(projectRunners) as Array; From dfd4bbe7412bd41622058434f193db4ad1672bbe Mon Sep 17 00:00:00 2001 From: Young-Jin Park Date: Fri, 1 Nov 2024 15:39:43 -0400 Subject: [PATCH 309/533] fix: add permissions to github workflow --- .github/workflows/create-releases.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml index 3a753b31c..19b7dd831 100644 --- a/.github/workflows/create-releases.yml +++ b/.github/workflows/create-releases.yml @@ -12,6 +12,9 @@ jobs: if: github.ref == 'refs/heads/master' && github.repository == 'openai/openai-node' runs-on: ubuntu-latest environment: publish + permissions: + contents: read + id-token: write steps: - uses: actions/checkout@v4 From 53f6ecc9e333a6e9adac2179efecdfe3f2ff6d8a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 1 Nov 2024 19:40:03 +0000 Subject: [PATCH 310/533] release: 4.70.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ package.json | 2 +- src/version.ts | 2 +- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f458b24a5..0d068338b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.70.1" + ".": "4.70.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7525af900..09e00049c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.70.2 (2024-11-01) + +Full Changelog: [v4.70.1...v4.70.2](https://github.com/openai/openai-node/compare/v4.70.1...v4.70.2) + +### Bug Fixes + +* add permissions to github workflow ([ee75e00](https://github.com/openai/openai-node/commit/ee75e00b0fbf82553b219ee8948a8077e9c26a24)) +* skip deno ecosystem test ([5b181b0](https://github.com/openai/openai-node/commit/5b181b01b62139f8da35d426914c82b8425af141)) + ## 4.70.1 (2024-11-01) Full Changelog: [v4.70.0...v4.70.1](https://github.com/openai/openai-node/compare/v4.70.0...v4.70.1) diff --git a/package.json b/package.json index 7e14e3b3b..cd5fbe3f8 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.70.1", + "version": "4.70.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 654369eef..f4beff9fa 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.70.1'; // x-release-please-version +export const VERSION = '4.70.2'; // x-release-please-version From 5d6188d685f371b219456f5b5251e0f33cc3fd27 Mon Sep 17 00:00:00 2001 From: Young-Jin Park Date: Fri, 1 Nov 2024 23:39:30 -0400 Subject: [PATCH 311/533] fix: change streaming helper imports to be relative --- src/lib/AbstractChatCompletionRunner.ts | 10 ++++----- src/lib/AssistantStream.ts | 26 ++++++++++++------------ src/lib/ChatCompletionRunner.ts | 6 +++--- src/lib/ChatCompletionStream.ts | 16 +++++++-------- src/lib/ChatCompletionStreamingRunner.ts | 8 ++++---- src/lib/EventStream.ts | 2 +- src/lib/chatCompletionUtils.ts | 2 +- src/lib/parser.ts | 2 +- 8 files changed, 36 insertions(+), 36 deletions(-) diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts index e943a4e4f..406f5a431 100644 --- a/src/lib/AbstractChatCompletionRunner.ts +++ b/src/lib/AbstractChatCompletionRunner.ts @@ -1,13 +1,13 @@ -import * as Core from 'openai/core'; -import { type CompletionUsage } from 'openai/resources/completions'; +import * as Core from '../core'; +import { type CompletionUsage } from '../resources/completions'; import { type ChatCompletion, type ChatCompletionMessage, type ChatCompletionMessageParam, type ChatCompletionCreateParams, type ChatCompletionTool, -} from 'openai/resources/chat/completions'; -import { OpenAIError } from 'openai/error'; +} from '../resources/chat/completions'; +import { OpenAIError } from '../error'; import { type RunnableFunction, isRunnableFunctionWithParse, @@ -23,7 +23,7 @@ import { isAssistantMessage, isFunctionMessage, isToolMessage } from './chatComp import { BaseEvents, EventStream } from './EventStream'; import { ParsedChatCompletion } from '../resources/beta/chat/completions'; import OpenAI from '../index'; -import { isAutoParsableTool, parseChatCompletion } from 'openai/lib/parser'; +import { isAutoParsableTool, parseChatCompletion } from '../lib/parser'; const DEFAULT_MAX_CHAT_COMPLETIONS = 10; export interface RunnerOptions extends Core.RequestOptions { diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index c826c910e..caf68e7dd 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -8,9 +8,9 @@ import { TextDelta, MessageDelta, MessageContent, -} from 'openai/resources/beta/threads/messages'; -import * as Core from 'openai/core'; -import { RequestOptions } from 'openai/core'; +} from '../resources/beta/threads/messages'; +import * as Core from '../core'; +import { RequestOptions } from '../core'; import { Run, RunCreateParamsBase, @@ -18,18 +18,18 @@ import { Runs, RunSubmitToolOutputsParamsBase, RunSubmitToolOutputsParamsStreaming, -} from 'openai/resources/beta/threads/runs/runs'; -import { type ReadableStream } from 'openai/_shims/index'; -import { Stream } from 'openai/streaming'; -import { APIUserAbortError, OpenAIError } from 'openai/error'; +} from '../resources/beta/threads/runs/runs'; +import { type ReadableStream } from '../_shims/index'; +import { Stream } from '../streaming'; +import { APIUserAbortError, OpenAIError } from '../error'; import { AssistantStreamEvent, MessageStreamEvent, RunStepStreamEvent, RunStreamEvent, -} from 'openai/resources/beta/assistants'; -import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from 'openai/resources/beta/threads/runs/steps'; -import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads'; +} from '../resources/beta/assistants'; +import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from '../resources/beta/threads/runs/steps'; +import { ThreadCreateAndRunParamsBase, Threads } from '../resources/beta/threads/threads'; import { BaseEvents, EventStream } from './EventStream'; export interface AssistantStreamEvents extends BaseEvents { @@ -192,7 +192,7 @@ export class AssistantStream runs: Runs, params: RunSubmitToolOutputsParamsStream, options: RequestOptions | undefined, - ) { + ): AssistantStream { const runner = new AssistantStream(); runner._run(() => runner._runToolAssistantStream(threadId, runId, runs, params, { @@ -238,7 +238,7 @@ export class AssistantStream params: ThreadCreateAndRunParamsBaseStream, thread: Threads, options?: RequestOptions, - ) { + ): AssistantStream { const runner = new AssistantStream(); runner._run(() => runner._threadAssistantStream(params, thread, { @@ -254,7 +254,7 @@ export class AssistantStream runs: Runs, params: RunCreateParamsBaseStream, options?: RequestOptions, - ) { + ): AssistantStream { const runner = new AssistantStream(); runner._run(() => runner._runAssistantStream(threadId, runs, params, { diff --git a/src/lib/ChatCompletionRunner.ts b/src/lib/ChatCompletionRunner.ts index 0b962a110..9e68e6671 100644 --- a/src/lib/ChatCompletionRunner.ts +++ b/src/lib/ChatCompletionRunner.ts @@ -1,7 +1,7 @@ import { type ChatCompletionMessageParam, type ChatCompletionCreateParamsNonStreaming, -} from 'openai/resources/chat/completions'; +} from '../resources/chat/completions'; import { type RunnableFunctions, type BaseFunctionsArgs, RunnableTools } from './RunnableFunction'; import { AbstractChatCompletionRunner, @@ -9,8 +9,8 @@ import { RunnerOptions, } from './AbstractChatCompletionRunner'; import { isAssistantMessage } from './chatCompletionUtils'; -import OpenAI from 'openai/index'; -import { AutoParseableTool } from 'openai/lib/parser'; +import OpenAI from '../index'; +import { AutoParseableTool } from '../lib/parser'; export interface ChatCompletionRunnerEvents extends AbstractChatCompletionRunnerEvents { content: (content: string) => void; diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts index e3661c8c1..a88f8a23b 100644 --- a/src/lib/ChatCompletionStream.ts +++ b/src/lib/ChatCompletionStream.ts @@ -1,10 +1,10 @@ -import * as Core from 'openai/core'; +import * as Core from '../core'; import { OpenAIError, APIUserAbortError, LengthFinishReasonError, ContentFilterFinishReasonError, -} from 'openai/error'; +} from '../error'; import { ChatCompletionTokenLogprob, type ChatCompletion, @@ -12,15 +12,15 @@ import { type ChatCompletionCreateParams, type ChatCompletionCreateParamsStreaming, type ChatCompletionCreateParamsBase, -} from 'openai/resources/chat/completions'; +} from '../resources/chat/completions'; import { AbstractChatCompletionRunner, type AbstractChatCompletionRunnerEvents, } from './AbstractChatCompletionRunner'; -import { type ReadableStream } from 'openai/_shims/index'; -import { Stream } from 'openai/streaming'; -import OpenAI from 'openai/index'; -import { ParsedChatCompletion } from 'openai/resources/beta/chat/completions'; +import { type ReadableStream } from '../_shims/index'; +import { Stream } from '../streaming'; +import OpenAI from '../index'; +import { ParsedChatCompletion } from '../resources/beta/chat/completions'; import { AutoParseableResponseFormat, hasAutoParseableInput, @@ -28,7 +28,7 @@ import { isAutoParsableTool, maybeParseChatCompletion, shouldParseToolCall, -} from 'openai/lib/parser'; +} from '../lib/parser'; import { partialParse } from '../_vendor/partial-json-parser/parser'; export interface ContentDeltaEvent { diff --git a/src/lib/ChatCompletionStreamingRunner.ts b/src/lib/ChatCompletionStreamingRunner.ts index ea6c74116..ba0c6496f 100644 --- a/src/lib/ChatCompletionStreamingRunner.ts +++ b/src/lib/ChatCompletionStreamingRunner.ts @@ -1,13 +1,13 @@ import { type ChatCompletionChunk, type ChatCompletionCreateParamsStreaming, -} from 'openai/resources/chat/completions'; +} from '../resources/chat/completions'; import { RunnerOptions, type AbstractChatCompletionRunnerEvents } from './AbstractChatCompletionRunner'; -import { type ReadableStream } from 'openai/_shims/index'; +import { type ReadableStream } from '../_shims/index'; import { RunnableTools, type BaseFunctionsArgs, type RunnableFunctions } from './RunnableFunction'; import { ChatCompletionSnapshot, ChatCompletionStream } from './ChatCompletionStream'; -import OpenAI from 'openai/index'; -import { AutoParseableTool } from 'openai/lib/parser'; +import OpenAI from '../index'; +import { AutoParseableTool } from '../lib/parser'; export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents { content: (contentDelta: string, contentSnapshot: string) => void; diff --git a/src/lib/EventStream.ts b/src/lib/EventStream.ts index a18c771dd..d3f485e9d 100644 --- a/src/lib/EventStream.ts +++ b/src/lib/EventStream.ts @@ -1,4 +1,4 @@ -import { APIUserAbortError, OpenAIError } from 'openai/error'; +import { APIUserAbortError, OpenAIError } from '../error'; export class EventStream { controller: AbortController = new AbortController(); diff --git a/src/lib/chatCompletionUtils.ts b/src/lib/chatCompletionUtils.ts index a0d9099de..7e9f8a093 100644 --- a/src/lib/chatCompletionUtils.ts +++ b/src/lib/chatCompletionUtils.ts @@ -3,7 +3,7 @@ import { type ChatCompletionFunctionMessageParam, type ChatCompletionMessageParam, type ChatCompletionToolMessageParam, -} from 'openai/resources'; +} from '../resources'; export const isAssistantMessage = ( message: ChatCompletionMessageParam | null | undefined, diff --git a/src/lib/parser.ts b/src/lib/parser.ts index 8bf2a3a36..f2678e312 100644 --- a/src/lib/parser.ts +++ b/src/lib/parser.ts @@ -13,7 +13,7 @@ import { ParsedFunctionToolCall, } from '../resources/beta/chat/completions'; import { ResponseFormatJSONSchema } from '../resources/shared'; -import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from 'openai/error'; +import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from '../error'; type AnyChatCompletionCreateParams = | ChatCompletionCreateParams From e0b675f7ee8202a7522be588f4bc297553f5fb3a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 05:07:32 +0000 Subject: [PATCH 312/533] release: 4.70.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ package.json | 2 +- src/version.ts | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 0d068338b..6c3b02fed 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.70.2" + ".": "4.70.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 09e00049c..abe273b81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.70.3 (2024-11-04) + +Full Changelog: [v4.70.2...v4.70.3](https://github.com/openai/openai-node/compare/v4.70.2...v4.70.3) + +### Bug Fixes + +* change streaming helper imports to be relative ([e73b7cf](https://github.com/openai/openai-node/commit/e73b7cf84272bd02a39a67795d49db23db2d970f)) + ## 4.70.2 (2024-11-01) Full Changelog: [v4.70.1...v4.70.2](https://github.com/openai/openai-node/compare/v4.70.1...v4.70.2) diff --git a/package.json b/package.json index cd5fbe3f8..e9d130380 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.70.2", + "version": "4.70.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index f4beff9fa..04f8abf02 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.70.2'; // x-release-please-version +export const VERSION = '4.70.3'; // x-release-please-version From 840179f42eeffc8e533f4b7b2a38e36c593ad8e5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 22:43:08 +0000 Subject: [PATCH 313/533] feat(api): add support for predicted outputs (#1172) --- .stats.yml | 2 +- api.md | 1 + src/index.ts | 2 + src/resources/audio/speech.ts | 4 +- src/resources/audio/transcriptions.ts | 2 +- src/resources/audio/translations.ts | 2 +- src/resources/beta/assistants.ts | 36 +++++++-------- src/resources/beta/threads/messages.ts | 4 +- src/resources/beta/threads/runs/runs.ts | 18 ++++---- src/resources/beta/threads/runs/steps.ts | 8 ++-- src/resources/beta/threads/threads.ts | 10 ++--- .../beta/vector-stores/file-batches.ts | 4 +- src/resources/beta/vector-stores/files.ts | 4 +- .../beta/vector-stores/vector-stores.ts | 4 +- src/resources/chat/chat.ts | 2 + src/resources/chat/completions.ts | 44 +++++++++++++++---- src/resources/chat/index.ts | 1 + src/resources/completions.ts | 24 +++++++--- src/resources/embeddings.ts | 6 +-- src/resources/files.ts | 17 ++++--- src/resources/fine-tuning/jobs/jobs.ts | 2 +- src/resources/images.ts | 6 +-- src/resources/moderations.ts | 2 +- src/resources/uploads/uploads.ts | 2 +- tests/api-resources/chat/completions.test.ts | 1 + tests/api-resources/files.test.ts | 5 ++- 26 files changed, 133 insertions(+), 80 deletions(-) diff --git a/.stats.yml b/.stats.yml index 39413df44..f368bc881 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7b0a5d715d94f75ac7795bd4d2175a0e3243af9b935a86c273f371e45583140f.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2f8ca92b9b1879fd535b685e4767338413fcd533d42f3baac13a9c41da3fce35.yml diff --git a/api.md b/api.md index da60f65bd..465730de8 100644 --- a/api.md +++ b/api.md @@ -48,6 +48,7 @@ Types: - ChatCompletionMessageToolCall - ChatCompletionModality - ChatCompletionNamedToolChoice +- ChatCompletionPredictionContent - ChatCompletionRole - ChatCompletionStreamOptions - ChatCompletionSystemMessageParam diff --git a/src/index.ts b/src/index.ts index 33b0848e4..c3299e00d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -87,6 +87,7 @@ import { ChatCompletionMessageToolCall, ChatCompletionModality, ChatCompletionNamedToolChoice, + ChatCompletionPredictionContent, ChatCompletionRole, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, @@ -379,6 +380,7 @@ export declare namespace OpenAI { type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent as ChatCompletionPredictionContent, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index da99bf649..1cda80f79 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -22,7 +22,7 @@ export interface SpeechCreateParams { input: string; /** - * One of the available [TTS models](https://platform.openai.com/docs/models/tts): + * One of the available [TTS models](https://platform.openai.com/docs/models#tts): * `tts-1` or `tts-1-hd` */ model: (string & {}) | SpeechModel; @@ -31,7 +31,7 @@ export interface SpeechCreateParams { * The voice to use when generating the audio. Supported voices are `alloy`, * `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are * available in the - * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). + * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). */ voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index dd4258787..0b6da4620 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -174,7 +174,7 @@ export interface TranscriptionCreateParams< /** * An optional text to guide the model's style or continue a previous audio * segment. The - * [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) * should match the audio language. */ prompt?: string; diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index b98a95044..c6bf7c870 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -76,7 +76,7 @@ export interface TranslationCreateParams< /** * An optional text to guide the model's style or continue a previous audio * segment. The - * [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) * should be in English. */ prompt?: string; diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 6d48089ce..0e657b1d4 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -121,8 +121,8 @@ export interface Assistant { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model: string; @@ -145,8 +145,8 @@ export interface Assistant { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -620,7 +620,7 @@ export namespace AssistantStreamEvent { /** * Occurs when an - * [error](https://platform.openai.com/docs/guides/error-codes/api-errors) occurs. + * [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. * This can happen due to an internal server error or a timeout. */ export interface ErrorEvent { @@ -663,7 +663,7 @@ export namespace FileSearchTool { * * Note that the file search tool may output fewer than `max_num_results` results. * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ max_num_results?: number; @@ -673,7 +673,7 @@ export namespace FileSearchTool { * will use the `auto` ranker and a score_threshold of 0. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ ranking_options?: FileSearch.RankingOptions; @@ -685,7 +685,7 @@ export namespace FileSearchTool { * will use the `auto` ranker and a score_threshold of 0. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ export interface RankingOptions { @@ -1100,8 +1100,8 @@ export interface AssistantCreateParams { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model: (string & {}) | ChatAPI.ChatModel; @@ -1131,8 +1131,8 @@ export interface AssistantCreateParams { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1277,8 +1277,8 @@ export interface AssistantUpdateParams { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model?: string; @@ -1289,8 +1289,8 @@ export interface AssistantUpdateParams { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1383,8 +1383,8 @@ export interface AssistantListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index af7977667..8124f56cd 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -704,8 +704,8 @@ export interface MessageListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 83a447a91..814ad3e89 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -435,7 +435,7 @@ export interface Run { /** * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ parallel_tool_calls: boolean; @@ -448,8 +448,8 @@ export interface Run { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -660,7 +660,7 @@ export interface RunCreateParamsBase { * search result content. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ include?: Array; @@ -721,15 +721,15 @@ export interface RunCreateParamsBase { /** * Body param: Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ parallel_tool_calls?: boolean; /** * Body param: Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -909,8 +909,8 @@ export interface RunListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index b10bcb868..6c6722b62 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -705,7 +705,7 @@ export interface StepRetrieveParams { * to fetch the file search result content. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ include?: Array; @@ -715,8 +715,8 @@ export interface StepListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; @@ -726,7 +726,7 @@ export interface StepListParams extends CursorPageParams { * to fetch the file search result content. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ include?: Array; diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 899645508..453d8fa10 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -176,8 +176,8 @@ export class Threads extends APIResource { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -565,15 +565,15 @@ export interface ThreadCreateAndRunParamsBase { /** * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ parallel_tool_calls?: boolean; /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts index 533e6ce03..2c47cb9c2 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/beta/vector-stores/file-batches.ts @@ -276,8 +276,8 @@ export interface FileBatchListFilesParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts index a263a0491..1fda9a99b 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/beta/vector-stores/files.ts @@ -268,8 +268,8 @@ export interface FileListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index 4d1e83dce..35ad8c369 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -372,8 +372,8 @@ export interface VectorStoreListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index afe4dd08e..351430f8c 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -23,6 +23,7 @@ import { ChatCompletionMessageToolCall, ChatCompletionModality, ChatCompletionNamedToolChoice, + ChatCompletionPredictionContent, ChatCompletionRole, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, @@ -101,6 +102,7 @@ export declare namespace Chat { type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent as ChatCompletionPredictionContent, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 430e52bb2..9d344744a 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -469,7 +469,7 @@ export namespace ChatCompletionContentPartImage { /** * Specifies the detail level of the image. Learn more in the - * [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + * [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). */ detail?: 'auto' | 'low' | 'high'; } @@ -687,6 +687,25 @@ export namespace ChatCompletionNamedToolChoice { } } +/** + * Static predicted output content, such as the content of a text file that is + * being regenerated. + */ +export interface ChatCompletionPredictionContent { + /** + * The content that should be matched when generating a model response. If + * generated tokens would match this content, the entire model response can be + * returned much more quickly. + */ + content: string | Array; + + /** + * The type of the predicted content you want to provide. This type is currently + * always `content`. + */ + type: 'content'; +} + /** * The role of the author of a message */ @@ -855,7 +874,7 @@ export interface ChatCompletionCreateParamsBase { /** * ID of the model to use. See the - * [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) + * [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) * table for details on which models work with the Chat API. */ model: (string & {}) | ChatAPI.ChatModel; @@ -872,7 +891,7 @@ export interface ChatCompletionCreateParamsBase { * existing frequency in the text so far, decreasing the model's likelihood to * repeat the same line verbatim. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ frequency_penalty?: number | null; @@ -963,25 +982,31 @@ export interface ChatCompletionCreateParamsBase { /** * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ parallel_tool_calls?: boolean; + /** + * Static predicted output content, such as the content of a text file that is + * being regenerated. + */ + prediction?: ChatCompletionPredictionContent | null; + /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on * whether they appear in the text so far, increasing the model's likelihood to * talk about new topics. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ presence_penalty?: number | null; /** * An object specifying the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1107,7 +1132,7 @@ export interface ChatCompletionCreateParamsBase { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } @@ -1204,6 +1229,7 @@ export declare namespace Completions { type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent as ChatCompletionPredictionContent, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index d9366bf74..262bf75a2 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -20,6 +20,7 @@ export { type ChatCompletionMessageToolCall, type ChatCompletionModality, type ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent, type ChatCompletionRole, type ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam, diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 94c4581a1..be75a46f0 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -137,6 +137,12 @@ export namespace CompletionUsage { * Breakdown of tokens used in a completion. */ export interface CompletionTokensDetails { + /** + * When using Predicted Outputs, the number of tokens in the prediction that + * appeared in the completion. + */ + accepted_prediction_tokens?: number; + /** * Audio input tokens generated by the model. */ @@ -146,6 +152,14 @@ export namespace CompletionUsage { * Tokens generated by the model for reasoning. */ reasoning_tokens?: number; + + /** + * When using Predicted Outputs, the number of tokens in the prediction that did + * not appear in the completion. However, like reasoning tokens, these tokens are + * still counted in the total completion tokens for purposes of billing, output, + * and context window limits. + */ + rejected_prediction_tokens?: number; } /** @@ -171,8 +185,8 @@ export interface CompletionCreateParamsBase { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model: (string & {}) | 'gpt-3.5-turbo-instruct' | 'davinci-002' | 'babbage-002'; @@ -209,7 +223,7 @@ export interface CompletionCreateParamsBase { * existing frequency in the text so far, decreasing the model's likelihood to * repeat the same line verbatim. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ frequency_penalty?: number | null; @@ -264,7 +278,7 @@ export interface CompletionCreateParamsBase { * whether they appear in the text so far, increasing the model's likelihood to * talk about new topics. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ presence_penalty?: number | null; @@ -327,7 +341,7 @@ export interface CompletionCreateParamsBase { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index e2b35f530..4b1644a68 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -94,8 +94,8 @@ export interface EmbeddingCreateParams { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model: (string & {}) | EmbeddingModel; @@ -114,7 +114,7 @@ export interface EmbeddingCreateParams { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } diff --git a/src/resources/files.ts b/src/resources/files.ts index dec815a28..48d8f8747 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -5,7 +5,7 @@ import { isRequestOptions } from '../core'; import { sleep } from '../core'; import { APIConnectionTimeoutError } from '../error'; import * as Core from '../core'; -import { Page } from '../pagination'; +import { CursorPage, type CursorPageParams } from '../pagination'; import { type Response } from '../_shims/index'; export class Files extends APIResource { @@ -44,7 +44,7 @@ export class Files extends APIResource { } /** - * Returns a list of files that belong to the user's organization. + * Returns a list of files. */ list(query?: FileListParams, options?: Core.RequestOptions): Core.PagePromise; list(options?: Core.RequestOptions): Core.PagePromise; @@ -111,10 +111,7 @@ export class Files extends APIResource { } } -/** - * Note: no pagination actually occurs yet, this is for forwards-compatibility. - */ -export class FileObjectsPage extends Page {} +export class FileObjectsPage extends CursorPage {} export type FileContent = string; @@ -213,7 +210,13 @@ export interface FileCreateParams { purpose: FilePurpose; } -export interface FileListParams { +export interface FileListParams extends CursorPageParams { + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending + * order and `desc` for descending order. + */ + order?: 'asc' | 'desc'; + /** * Only return files with the given purpose. */ diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 275fad869..0c320e028 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -304,7 +304,7 @@ export interface FineTuningJobWandbIntegrationObject { export interface JobCreateParams { /** * The name of the model to fine-tune. You can select one of the - * [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + * [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). */ model: (string & {}) | 'babbage-002' | 'davinci-002' | 'gpt-3.5-turbo' | 'gpt-4o-mini'; diff --git a/src/resources/images.ts b/src/resources/images.ts index f4d59b941..8e1c6d92e 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -94,7 +94,7 @@ export interface ImageCreateVariationParams { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } @@ -146,7 +146,7 @@ export interface ImageEditParams { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } @@ -201,7 +201,7 @@ export interface ImageGenerateParams { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index cdde12a62..f7b16166d 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -351,7 +351,7 @@ export interface ModerationCreateParams { * The content moderation model you would like to use. Learn more in * [the moderation guide](https://platform.openai.com/docs/guides/moderation), and * learn about available models - * [here](https://platform.openai.com/docs/models/moderation). + * [here](https://platform.openai.com/docs/models#moderation). */ model?: (string & {}) | ModerationModel; } diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index 78fa3a7b5..8491d0fe2 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -25,7 +25,7 @@ export class Uploads extends APIResource { * For certain `purpose`s, the correct `mime_type` must be specified. Please refer * to documentation for the supported MIME types for your use case: * - * - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files) + * - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files) * * For guidance on the proper filename extensions for each purpose, please follow * the documentation on diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 77d4a251c..180a1d77f 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -39,6 +39,7 @@ describe('resource completions', () => { modalities: ['text', 'audio'], n: 1, parallel_tool_calls: true, + prediction: { content: 'string', type: 'content' }, presence_penalty: -2, response_format: { type: 'text' }, seed: -9007199254740991, diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index bbaa45a65..c907c4987 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -69,7 +69,10 @@ describe('resource files', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.files.list({ purpose: 'purpose' }, { path: '/_stainless_unknown_path' }), + client.files.list( + { after: 'after', limit: 0, order: 'asc', purpose: 'purpose' }, + { path: '/_stainless_unknown_path' }, + ), ).rejects.toThrow(OpenAI.NotFoundError); }); From 35cfdb8d400d90403319418bfe345e0d1bd24be5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 22:43:34 +0000 Subject: [PATCH 314/533] release: 4.71.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ package.json | 2 +- src/version.ts | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6c3b02fed..b295c3f54 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.70.3" + ".": "4.71.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index abe273b81..bb769c53e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.71.0 (2024-11-04) + +Full Changelog: [v4.70.3...v4.71.0](https://github.com/openai/openai-node/compare/v4.70.3...v4.71.0) + +### Features + +* **api:** add support for predicted outputs ([#1172](https://github.com/openai/openai-node/issues/1172)) ([08a7bb4](https://github.com/openai/openai-node/commit/08a7bb4d4b751aeed9655bfcb9fa27fc79a767c4)) + ## 4.70.3 (2024-11-04) Full Changelog: [v4.70.2...v4.70.3](https://github.com/openai/openai-node/compare/v4.70.2...v4.70.3) diff --git a/package.json b/package.json index e9d130380..501d4f31e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.70.3", + "version": "4.71.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 04f8abf02..273878132 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.70.3'; // x-release-please-version +export const VERSION = '4.71.0'; // x-release-please-version From f0a1288d37683e8eee7df6a9e5838fbfee35cbe3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 5 Nov 2024 21:19:10 +0000 Subject: [PATCH 315/533] fix: change release please configuration for jsr.json (#1174) --- release-please-config.json | 6 +++++- scripts/build-deno | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/release-please-config.json b/release-please-config.json index 377a76e99..1aa2fb613 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -63,6 +63,10 @@ "extra-files": [ "src/version.ts", "README.md", - "jsr.json" + { + "type": "json", + "path": "jsr.json", + "jsonpath": "$.version" + } ] } diff --git a/scripts/build-deno b/scripts/build-deno index 7d542cf24..4a2000a66 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -14,6 +14,6 @@ for dir in dist-deno/_shims dist-deno/_shims/auto; do mv -- "$file" "${file%-deno.ts}.ts" done done -for file in LICENSE CHANGELOG.md; do +for file in README.md LICENSE CHANGELOG.md; do if [ -e "${file}" ]; then cp "${file}" dist-deno; fi done From f41f1811c90b3e3b54a4356c8e2ca39189f4ce66 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 05:06:50 +0000 Subject: [PATCH 316/533] release: 4.71.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 6 ++++-- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 15 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b295c3f54..6fbbb03de 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.71.0" + ".": "4.71.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index bb769c53e..1e74a8ee3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.71.1 (2024-11-06) + +Full Changelog: [v4.71.0...v4.71.1](https://github.com/openai/openai-node/compare/v4.71.0...v4.71.1) + +### Bug Fixes + +* change release please configuration for jsr.json ([#1174](https://github.com/openai/openai-node/issues/1174)) ([c39efba](https://github.com/openai/openai-node/commit/c39efba812209c8906315596cc0a56e54ae8590a)) + ## 4.71.0 (2024-11-04) Full Changelog: [v4.70.3...v4.71.0](https://github.com/openai/openai-node/compare/v4.70.3...v4.71.0) diff --git a/jsr.json b/jsr.json index fefb5b291..48a838612 100644 --- a/jsr.json +++ b/jsr.json @@ -1,8 +1,10 @@ { "name": "@openai/openai", - "version": "4.47.1", + "version": "4.71.1", "exports": "./index.ts", "publish": { - "exclude": ["!."] + "exclude": [ + "!." + ] } } diff --git a/package.json b/package.json index 501d4f31e..dd3dfba7a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.71.0", + "version": "4.71.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 273878132..3474c77c3 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.71.0'; // x-release-please-version +export const VERSION = '4.71.1'; // x-release-please-version From 4dfb0c6aa7c4530665bc7d6beebcd04aa1490e27 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 20:50:47 +0000 Subject: [PATCH 317/533] chore(ecosystem-tests): bump wrangler version (#1178) Co-authored-by: stainless-bot --- .../cloudflare-worker/package-lock.json | 301 +++++++++++------- .../cloudflare-worker/package.json | 2 +- 2 files changed, 189 insertions(+), 114 deletions(-) diff --git a/ecosystem-tests/cloudflare-worker/package-lock.json b/ecosystem-tests/cloudflare-worker/package-lock.json index 0673bb27c..99d787f75 100644 --- a/ecosystem-tests/cloudflare-worker/package-lock.json +++ b/ecosystem-tests/cloudflare-worker/package-lock.json @@ -17,7 +17,7 @@ "start-server-and-test": "^2.0.0", "ts-jest": "^29.1.0", "typescript": "5.0.4", - "wrangler": "^3.0.0" + "wrangler": "^3.85.0" } }, "node_modules/@ampproject/remapping": { @@ -662,18 +662,21 @@ "dev": true }, "node_modules/@cloudflare/kv-asset-handler": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@cloudflare/kv-asset-handler/-/kv-asset-handler-0.2.0.tgz", - "integrity": "sha512-MVbXLbTcAotOPUj0pAMhVtJ+3/kFkwJqc5qNOleOZTv6QkZZABDMS21dSrSlVswEHwrpWC03e4fWytjqKvuE2A==", + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/@cloudflare/kv-asset-handler/-/kv-asset-handler-0.3.4.tgz", + "integrity": "sha512-YLPHc8yASwjNkmcDMQMY35yiWjoKAKnhUbPRszBRS0YgH+IXtsMp61j+yTcnCE3oO2DgP0U3iejLC8FTtKDC8Q==", "dev": true, "dependencies": { "mime": "^3.0.0" + }, + "engines": { + "node": ">=16.13" } }, "node_modules/@cloudflare/workerd-darwin-64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20231030.0.tgz", - "integrity": "sha512-J4PQ9utPxLya9yHdMMx3AZeC5M/6FxcoYw6jo9jbDDFTy+a4Gslqf4Im9We3aeOEdPXa3tgQHVQOSelJSZLhIw==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20241022.0.tgz", + "integrity": "sha512-1NNYun37myMTgCUiPQEJ0cMal4mKZVTpkD0b2tx9hV70xji+frVJcSK8YVLeUm1P+Rw1d/ct8DMgQuCpsz3Fsw==", "cpu": [ "x64" ], @@ -687,9 +690,9 @@ } }, "node_modules/@cloudflare/workerd-darwin-arm64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20231030.0.tgz", - "integrity": "sha512-WSJJjm11Del4hSneiNB7wTXGtBXI4QMCH9l5qf4iT5PAW8cESGcCmdHtWDWDtGAAGcvmLT04KNvmum92vRKKQQ==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20241022.0.tgz", + "integrity": "sha512-FOO/0P0U82EsTLTdweNVgw+4VOk5nghExLPLSppdOziq6IR5HVgP44Kmq5LdsUeHUhwUmfOh9hzaTpkNzUqKvw==", "cpu": [ "arm64" ], @@ -703,9 +706,9 @@ } }, "node_modules/@cloudflare/workerd-linux-64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20231030.0.tgz", - "integrity": "sha512-2HUeRTvoCC17fxE0qdBeR7J9dO8j4A8ZbdcvY8pZxdk+zERU6+N03RTbk/dQMU488PwiDvcC3zZqS4gwLfVT8g==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20241022.0.tgz", + "integrity": "sha512-RsNc19BQJG9yd+ngnjuDeG9ywZG+7t1L4JeglgceyY5ViMNMKVO7Zpbsu69kXslU9h6xyQG+lrmclg3cBpnhYA==", "cpu": [ "x64" ], @@ -719,9 +722,9 @@ } }, "node_modules/@cloudflare/workerd-linux-arm64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20231030.0.tgz", - "integrity": "sha512-4/GK5zHh+9JbUI6Z5xTCM0ZmpKKHk7vu9thmHjUxtz+o8Ne9DoD7DlDvXQWgMF6XGaTubDWyp3ttn+Qv8jDFuQ==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20241022.0.tgz", + "integrity": "sha512-x5mUXpKxfsosxcFmcq5DaqLs37PejHYVRsNz1cWI59ma7aC4y4Qn6Tf3i0r9MwQTF/MccP4SjVslMU6m4W7IaA==", "cpu": [ "arm64" ], @@ -735,9 +738,9 @@ } }, "node_modules/@cloudflare/workerd-windows-64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20231030.0.tgz", - "integrity": "sha512-fb/Jgj8Yqy3PO1jLhk7mTrHMkR8jklpbQFud6rL/aMAn5d6MQbaSrYOCjzkKGp0Zng8D2LIzSl+Fc0C9Sggxjg==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20241022.0.tgz", + "integrity": "sha512-eBCClx4szCOgKqOlxxbdNszMqQf3MRG1B9BRIqEM/diDfdR9IrZ8l3FaEm+l9gXgPmS6m1NBn40aWuGBl8UTSw==", "cpu": [ "x64" ], @@ -750,12 +753,47 @@ "node": ">=16" } }, + "node_modules/@cloudflare/workers-shared": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workers-shared/-/workers-shared-0.7.0.tgz", + "integrity": "sha512-LLQRTqx7lKC7o2eCYMpyc5FXV8d0pUX6r3A+agzhqS9aoR5A6zCPefwQGcvbKx83ozX22ATZcemwxQXn12UofQ==", + "dev": true, + "dependencies": { + "mime": "^3.0.0", + "zod": "^3.22.3" + }, + "engines": { + "node": ">=16.7.0" + } + }, "node_modules/@cloudflare/workers-types": { - "version": "4.20230821.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20230821.0.tgz", - "integrity": "sha512-lVQSyr5E4CEkQw7WIdsrMTj+kHjsm28mJ0B5AhNFByKR+16KTFsU/RW/nGLKHHW2jxT5lvYI+HjNQMzC9QR8Ng==", + "version": "4.20241106.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20241106.0.tgz", + "integrity": "sha512-pI4ivacmp+vgNO/siHDsZ6BdITR0LC4Mh/1+yzVLcl9U75pt5DUDCOWOiqIRFXRq6H65DPnJbEPFo3x9UfgofQ==", "dev": true }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, "node_modules/@esbuild-plugins/node-globals-polyfill": { "version": "0.2.3", "resolved": "https://registry.npmjs.org/@esbuild-plugins/node-globals-polyfill/-/node-globals-polyfill-0.2.3.tgz", @@ -1142,6 +1180,15 @@ "node": ">=12" } }, + "node_modules/@fastify/busboy": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.1.1.tgz", + "integrity": "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==", + "dev": true, + "engines": { + "node": ">=14" + } + }, "node_modules/@hapi/hoek": { "version": "9.3.0", "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", @@ -1655,9 +1702,9 @@ "dev": true }, "node_modules/acorn": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", - "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "version": "8.14.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", + "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -1667,10 +1714,13 @@ } }, "node_modules/acorn-walk": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", - "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", "dev": true, + "dependencies": { + "acorn": "^8.11.0" + }, "engines": { "node": ">=0.4.0" } @@ -1983,18 +2033,6 @@ "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", "dev": true }, - "node_modules/busboy": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", - "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", - "dev": true, - "dependencies": { - "streamsearch": "^1.1.0" - }, - "engines": { - "node": ">=10.16.0" - } - }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -2198,9 +2236,9 @@ "dev": true }, "node_modules/cookie": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", - "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", "dev": true, "engines": { "node": ">= 0.6" @@ -2249,6 +2287,16 @@ "node": ">= 12" } }, + "node_modules/date-fns": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-4.1.0.tgz", + "integrity": "sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==", + "dev": true, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/kossnocorp" + } + }, "node_modules/debug": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", @@ -2289,6 +2337,12 @@ "node": ">=0.10.0" } }, + "node_modules/defu": { + "version": "6.1.4", + "resolved": "https://registry.npmjs.org/defu/-/defu-6.1.4.tgz", + "integrity": "sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==", + "dev": true + }, "node_modules/delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -3038,6 +3092,12 @@ "node": ">=8" } }, + "node_modules/itty-time": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/itty-time/-/itty-time-1.0.6.tgz", + "integrity": "sha512-+P8IZaLLBtFv8hCkIjcymZOp4UJ+xW6bSlQsXGqrkmJh7vSiMFSlNne0mCYagEE0N7HDNR5jJBRxwN0oYv61Rw==", + "dev": true + }, "node_modules/jest": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", @@ -3894,23 +3954,23 @@ } }, "node_modules/miniflare": { - "version": "3.20231030.3", - "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-3.20231030.3.tgz", - "integrity": "sha512-lquHSh0XiO8uoWDujOLHtDS9mkUTJTc5C5amiQ6A++5y0f+DWiMqbDBvvwjlYf4Dvqk6ChFya9dztk7fg2ZVxA==", + "version": "3.20241022.0", + "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-3.20241022.0.tgz", + "integrity": "sha512-x9Fbq1Hmz1f0osIT9Qmj78iX4UpCP2EqlZnA/tzj/3+I49vc3Kq0fNqSSKplcdf6HlCHdL3fOBicmreQF4BUUQ==", "dev": true, "dependencies": { + "@cspotcode/source-map-support": "0.8.1", "acorn": "^8.8.0", "acorn-walk": "^8.2.0", "capnp-ts": "^0.7.0", "exit-hook": "^2.2.1", "glob-to-regexp": "^0.4.1", - "source-map-support": "0.5.21", "stoppable": "^1.1.0", - "undici": "^5.22.1", - "workerd": "1.20231030.0", - "ws": "^8.11.0", + "undici": "^5.28.4", + "workerd": "1.20241022.0", + "ws": "^8.17.1", "youch": "^3.2.2", - "zod": "^3.20.6" + "zod": "^3.22.3" }, "bin": { "miniflare": "bootstrap.js" @@ -3919,16 +3979,6 @@ "node": ">=16.13" } }, - "node_modules/miniflare/node_modules/source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "dev": true, - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, "node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", @@ -4066,6 +4116,12 @@ "node": ">=8" } }, + "node_modules/ohash": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/ohash/-/ohash-1.1.4.tgz", + "integrity": "sha512-FlDryZAahJmEF3VR3w1KogSEdWX3WhA5GPakFx4J81kEAiHyLMpdLLElS8n8dfNadMgAne/MywcvmogzscVt4g==", + "dev": true + }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -4193,9 +4249,15 @@ "dev": true }, "node_modules/path-to-regexp": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz", - "integrity": "sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw==", + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz", + "integrity": "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==", + "dev": true + }, + "node_modules/pathe": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", + "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", "dev": true }, "node_modules/pause-stream": { @@ -4613,15 +4675,6 @@ "duplexer": "~0.1.1" } }, - "node_modules/streamsearch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", - "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", - "dev": true, - "engines": { - "node": ">=10.0.0" - } - }, "node_modules/string-length": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", @@ -4878,18 +4931,37 @@ "node": ">=12.20" } }, + "node_modules/ufo": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.5.4.tgz", + "integrity": "sha512-UsUk3byDzKd04EyoZ7U4DOlxQaD14JUKQl6/P7wiX4FNvUfm3XL246n9W5AmqwW5RSFJ27NAuM0iLscAOYUiGQ==", + "dev": true + }, "node_modules/undici": { - "version": "5.23.0", - "resolved": "https://registry.npmjs.org/undici/-/undici-5.23.0.tgz", - "integrity": "sha512-1D7w+fvRsqlQ9GscLBwcAJinqcZGHUKjbOmXdlE/v8BvEGXjeWAax+341q44EuTcHXXnfyKNbKRq4Lg7OzhMmg==", + "version": "5.28.4", + "resolved": "https://registry.npmjs.org/undici/-/undici-5.28.4.tgz", + "integrity": "sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==", "dev": true, "dependencies": { - "busboy": "^1.6.0" + "@fastify/busboy": "^2.0.0" }, "engines": { "node": ">=14.0" } }, + "node_modules/unenv": { + "name": "unenv-nightly", + "version": "2.0.0-20241024-111401-d4156ac", + "resolved": "https://registry.npmjs.org/unenv-nightly/-/unenv-nightly-2.0.0-20241024-111401-d4156ac.tgz", + "integrity": "sha512-xJO1hfY+Te+/XnfCYrCbFbRcgu6XEODND1s5wnVbaBCkuQX7JXF7fHEXPrukFE2j8EOH848P8QN19VO47XN8hw==", + "dev": true, + "dependencies": { + "defu": "^6.1.4", + "ohash": "^1.1.4", + "pathe": "^1.1.2", + "ufo": "^1.5.4" + } + }, "node_modules/update-browserslist-db": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", @@ -4986,9 +5058,9 @@ } }, "node_modules/workerd": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20231030.0.tgz", - "integrity": "sha512-+FSW+d31f8RrjHanFf/R9A+Z0csf3OtsvzdPmAKuwuZm/5HrBv83cvG9fFeTxl7/nI6irUUXIRF9xcj/NomQzQ==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20241022.0.tgz", + "integrity": "sha512-jyGXsgO9DRcJyx6Ovv7gUyDPc3UYC2i/E0p9GFUg6GUzpldw4Y93y9kOmdfsOnKZ3+lY53veSiUniiBPE6Q2NQ==", "dev": true, "hasInstallScript": true, "bin": { @@ -4998,32 +5070,37 @@ "node": ">=16" }, "optionalDependencies": { - "@cloudflare/workerd-darwin-64": "1.20231030.0", - "@cloudflare/workerd-darwin-arm64": "1.20231030.0", - "@cloudflare/workerd-linux-64": "1.20231030.0", - "@cloudflare/workerd-linux-arm64": "1.20231030.0", - "@cloudflare/workerd-windows-64": "1.20231030.0" + "@cloudflare/workerd-darwin-64": "1.20241022.0", + "@cloudflare/workerd-darwin-arm64": "1.20241022.0", + "@cloudflare/workerd-linux-64": "1.20241022.0", + "@cloudflare/workerd-linux-arm64": "1.20241022.0", + "@cloudflare/workerd-windows-64": "1.20241022.0" } }, "node_modules/wrangler": { - "version": "3.19.0", - "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-3.19.0.tgz", - "integrity": "sha512-pY7xWqkQn6DJ+1vz9YHz2pCftEmK+JCTj9sqnucp0NZnlUiILDmBWegsjjCLZycgfiA62J213N7NvjLPr2LB8w==", + "version": "3.85.0", + "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-3.85.0.tgz", + "integrity": "sha512-r5YCWUaF4ApLnloNE6jHHgRYdFzYHoajTlC1tns42UzQ2Ls63VAqD3b0cxOqzDUfmlSb3skpmu0B0Ssi3QWPAg==", "dev": true, "dependencies": { - "@cloudflare/kv-asset-handler": "^0.2.0", + "@cloudflare/kv-asset-handler": "0.3.4", + "@cloudflare/workers-shared": "0.7.0", "@esbuild-plugins/node-globals-polyfill": "^0.2.3", "@esbuild-plugins/node-modules-polyfill": "^0.2.2", "blake3-wasm": "^2.1.5", "chokidar": "^3.5.3", + "date-fns": "^4.1.0", "esbuild": "0.17.19", - "miniflare": "3.20231030.3", + "itty-time": "^1.0.6", + "miniflare": "3.20241022.0", "nanoid": "^3.3.3", - "path-to-regexp": "^6.2.0", + "path-to-regexp": "^6.3.0", + "resolve": "^1.22.8", "resolve.exports": "^2.0.2", "selfsigned": "^2.0.1", - "source-map": "0.6.1", - "source-map-support": "0.5.21", + "source-map": "^0.6.1", + "unenv": "npm:unenv-nightly@2.0.0-20241024-111401-d4156ac", + "workerd": "1.20241022.0", "xxhash-wasm": "^1.0.1" }, "bin": { @@ -5035,16 +5112,14 @@ }, "optionalDependencies": { "fsevents": "~2.3.2" - } - }, - "node_modules/wrangler/node_modules/source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "dev": true, - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" + }, + "peerDependencies": { + "@cloudflare/workers-types": "^4.20241022.0" + }, + "peerDependenciesMeta": { + "@cloudflare/workers-types": { + "optional": true + } } }, "node_modules/wrap-ansi": { @@ -5084,9 +5159,9 @@ } }, "node_modules/ws": { - "version": "8.13.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz", - "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==", + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", + "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", "dev": true, "engines": { "node": ">=10.0.0" @@ -5165,20 +5240,20 @@ } }, "node_modules/youch": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/youch/-/youch-3.2.3.tgz", - "integrity": "sha512-ZBcWz/uzZaQVdCvfV4uk616Bbpf2ee+F/AvuKDR5EwX/Y4v06xWdtMluqTD7+KlZdM93lLm9gMZYo0sKBS0pgw==", + "version": "3.3.4", + "resolved": "https://registry.npmjs.org/youch/-/youch-3.3.4.tgz", + "integrity": "sha512-UeVBXie8cA35DS6+nBkls68xaBBXCye0CNznrhszZjTbRVnJKQuNsyLKBTTL4ln1o1rh2PKtv35twV7irj5SEg==", "dev": true, "dependencies": { - "cookie": "^0.5.0", + "cookie": "^0.7.1", "mustache": "^4.2.0", "stacktracey": "^2.1.8" } }, "node_modules/zod": { - "version": "3.22.2", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.22.2.tgz", - "integrity": "sha512-wvWkphh5WQsJbVk1tbx1l1Ly4yg+XecD+Mq280uBGt9wa5BKSWf4Mhp6GmrkPixhMxmabYY7RbzlwVP32pbGCg==", + "version": "3.23.8", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.23.8.tgz", + "integrity": "sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==", "dev": true, "funding": { "url": "https://github.com/sponsors/colinhacks" diff --git a/ecosystem-tests/cloudflare-worker/package.json b/ecosystem-tests/cloudflare-worker/package.json index 463de4045..3034e97f7 100644 --- a/ecosystem-tests/cloudflare-worker/package.json +++ b/ecosystem-tests/cloudflare-worker/package.json @@ -17,7 +17,7 @@ "start-server-and-test": "^2.0.0", "ts-jest": "^29.1.0", "typescript": "5.0.4", - "wrangler": "^3.0.0" + "wrangler": "^3.85.0" }, "dependencies": { "node-fetch": "^3.3.1" From 34306573a15a03a1e84177aa2f74d8e63adc0bf0 Mon Sep 17 00:00:00 2001 From: Young-Jin Park Date: Mon, 11 Nov 2024 17:43:09 -0500 Subject: [PATCH 318/533] feat: add back deno runtime testing without type checks --- ecosystem-tests/cli.ts | 18 ++++++++---------- ecosystem-tests/deno/deno.jsonc | 7 +++---- scripts/build-deno | 18 +++++++++++++----- 3 files changed, 24 insertions(+), 19 deletions(-) diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index b0ff712f1..4803b47c2 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -95,16 +95,14 @@ const projectRunners = { await run('bun', ['test']); } }, - // Temporarily comment this out until we can test with JSR transformations end-to-end. - // deno: async () => { - // // we don't need to explicitly install the package here - // // because our deno setup relies on `rootDir/deno` to exist - // // which is an artifact produced from our build process - // await run('deno', ['task', 'install']); - // await run('deno', ['task', 'check']); - - // if (state.live) await run('deno', ['task', 'test']); - // }, + deno: async () => { + // we don't need to explicitly install the package here + // because our deno setup relies on `rootDir/dist-deno` to exist + // which is an artifact produced from our build process + await run('deno', ['task', 'install', '--unstable-sloppy-imports']); + + if (state.live) await run('deno', ['task', 'test']); + }, }; let projectNames = Object.keys(projectRunners) as Array; diff --git a/ecosystem-tests/deno/deno.jsonc b/ecosystem-tests/deno/deno.jsonc index 7de05f2ba..46d7ee486 100644 --- a/ecosystem-tests/deno/deno.jsonc +++ b/ecosystem-tests/deno/deno.jsonc @@ -1,11 +1,10 @@ { "tasks": { "install": "deno install --node-modules-dir main_test.ts -f", - "check": "deno lint && deno check main_test.ts", - "test": "deno test --allow-env --allow-net --allow-read --node-modules-dir" + "test": "deno test --allow-env --allow-net --allow-read --node-modules-dir --unstable-sloppy-imports --no-check" }, "imports": { - "openai": "../../deno/mod.ts", - "openai/": "../../deno/" + "openai": "../../dist-deno/index.ts", + "openai/": "../../dist-deno/" } } diff --git a/scripts/build-deno b/scripts/build-deno index 4a2000a66..dfce83548 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -7,13 +7,21 @@ cd "$(dirname "$0")/.." rm -rf dist-deno; mkdir dist-deno cp -rp src/* jsr.json dist-deno +rm -rf dist-deno/shims + +rm dist-deno/_shims/node*.{js,mjs,ts} +rm dist-deno/_shims/manual*.{js,mjs,ts} +rm dist-deno/_shims/index.{d.ts,js,mjs} +for file in dist-deno/_shims/*-deno.ts; do + mv -- "$file" "${file%-deno.ts}.ts" +done + rm dist-deno/_shims/auto/*-node.ts -for dir in dist-deno/_shims dist-deno/_shims/auto; do - rm "${dir}"/*.{d.ts,js,mjs} - for file in "${dir}"/*-deno.ts; do - mv -- "$file" "${file%-deno.ts}.ts" - done +rm dist-deno/_shims/auto/*.{d.ts,js,mjs} +for file in dist-deno/_shims/auto/*-deno.ts; do + mv -- "$file" "${file%-deno.ts}.ts" done + for file in README.md LICENSE CHANGELOG.md; do if [ -e "${file}" ]; then cp "${file}" dist-deno; fi done From a92cc1dbc4ab3284c6654d69d5c39399a867f601 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 14:00:36 +0000 Subject: [PATCH 319/533] release: 4.72.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6fbbb03de..e53c9dd88 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.71.1" + ".": "4.72.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e74a8ee3..951ef0784 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.72.0 (2024-11-12) + +Full Changelog: [v4.71.1...v4.72.0](https://github.com/openai/openai-node/compare/v4.71.1...v4.72.0) + +### Features + +* add back deno runtime testing without type checks ([1626cf5](https://github.com/openai/openai-node/commit/1626cf57e94706e1fc8b2f9ff4f173fe486d5150)) + + +### Chores + +* **ecosystem-tests:** bump wrangler version ([#1178](https://github.com/openai/openai-node/issues/1178)) ([4dfb0c6](https://github.com/openai/openai-node/commit/4dfb0c6aa7c4530665bc7d6beebcd04aa1490e27)) + ## 4.71.1 (2024-11-06) Full Changelog: [v4.71.0...v4.71.1](https://github.com/openai/openai-node/compare/v4.71.0...v4.71.1) diff --git a/jsr.json b/jsr.json index 48a838612..ad1751852 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.71.1", + "version": "4.72.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index dd3dfba7a..85fbed4f1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.71.1", + "version": "4.72.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 3474c77c3..cad6e2320 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.71.1'; // x-release-please-version +export const VERSION = '4.72.0'; // x-release-please-version From f555dd6503bc4ccd4d13f4e1a1d36fbbfd51c369 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 12 Nov 2024 16:47:12 +0000 Subject: [PATCH 320/533] chore(internal): use reexports not destructuring (#1181) --- src/index.ts | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/src/index.ts b/src/index.ts index c3299e00d..8e7e7804e 100644 --- a/src/index.ts +++ b/src/index.ts @@ -306,25 +306,6 @@ export class OpenAI extends Core.APIClient { static fileFromPath = Uploads.fileFromPath; } -export { - OpenAIError, - APIError, - APIConnectionError, - APIConnectionTimeoutError, - APIUserAbortError, - NotFoundError, - ConflictError, - RateLimitError, - BadRequestError, - AuthenticationError, - InternalServerError, - PermissionDeniedError, - UnprocessableEntityError, -} from './error'; - -export import toFile = Uploads.toFile; -export import fileFromPath = Uploads.fileFromPath; - OpenAI.Completions = Completions; OpenAI.Chat = Chat; OpenAI.Embeddings = Embeddings; @@ -340,7 +321,6 @@ OpenAI.Beta = Beta; OpenAI.Batches = Batches; OpenAI.BatchesPage = BatchesPage; OpenAI.Uploads = UploadsAPIUploads; - export declare namespace OpenAI { export type RequestOptions = Core.RequestOptions; @@ -664,4 +644,21 @@ const API_KEY_SENTINEL = ''; // ---------------------- End Azure ---------------------- +export { toFile, fileFromPath } from 'openai/uploads'; +export { + OpenAIError, + APIError, + APIConnectionError, + APIConnectionTimeoutError, + APIUserAbortError, + NotFoundError, + ConflictError, + RateLimitError, + BadRequestError, + AuthenticationError, + InternalServerError, + PermissionDeniedError, + UnprocessableEntityError, +} from 'openai/error'; + export default OpenAI; From 4ec402790cf3cfbccbf3ef9b61d577b0118977e8 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Tue, 12 Nov 2024 16:48:27 +0000 Subject: [PATCH 321/533] docs: bump models in example snippets to gpt-4o (#1184) --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index caa3f9d4a..8d30be928 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ const client = new OpenAI({ async function main() { const chatCompletion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-3.5-turbo', + model: 'gpt-4o', }); } @@ -57,7 +57,7 @@ const client = new OpenAI(); async function main() { const stream = await client.chat.completions.create({ - model: 'gpt-4', + model: 'gpt-4o', messages: [{ role: 'user', content: 'Say this is a test' }], stream: true, }); @@ -87,7 +87,7 @@ const client = new OpenAI({ async function main() { const params: OpenAI.Chat.ChatCompletionCreateParams = { messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-3.5-turbo', + model: 'gpt-4o', }; const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params); } @@ -333,7 +333,7 @@ a subclass of `APIError` will be thrown: ```ts async function main() { const job = await client.fineTuning.jobs - .create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123' }) + .create({ model: 'gpt-4o', training_file: 'file-abc123' }) .catch(async (err) => { if (err instanceof OpenAI.APIError) { console.log(err.status); // 400 @@ -415,7 +415,7 @@ const client = new OpenAI({ }); // Or, configure per-request: -await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, { +await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in JavaScript?' }], model: 'gpt-4o' }, { maxRetries: 5, }); ``` @@ -432,7 +432,7 @@ const client = new OpenAI({ }); // Override per-request: -await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, { +await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-4o' }, { timeout: 5 * 1000, }); ``` @@ -485,13 +485,13 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi const client = new OpenAI(); const response = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' }) + .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }) .asResponse(); console.log(response.headers.get('X-My-Header')); console.log(response.statusText); // access the underlying Response object const { data: chatCompletion, response: raw } = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' }) + .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }) .withResponse(); console.log(raw.headers.get('X-My-Header')); console.log(chatCompletion); From 524b9e82ae13a3b5093dcfbfd1169a798cf99ab4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 14 Nov 2024 19:59:44 +0000 Subject: [PATCH 322/533] fix(docs): add missing await to pagination example (#1190) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8d30be928..2f05654b4 100644 --- a/README.md +++ b/README.md @@ -467,7 +467,7 @@ for (const fineTuningJob of page.data) { // Convenience methods are provided for manually paginating: while (page.hasNextPage()) { - page = page.getNextPage(); + page = await page.getNextPage(); // ... } ``` From 8ee6c0335673f2ecf84ea11bdfc990adab607e20 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 15 Nov 2024 08:31:55 +0000 Subject: [PATCH 323/533] chore(client): drop unused devDependency (#1191) --- package.json | 1 - src/index.ts | 4 ++-- yarn.lock | 40 ---------------------------------------- 3 files changed, 2 insertions(+), 43 deletions(-) diff --git a/package.json b/package.json index 85fbed4f1..8a61d468f 100644 --- a/package.json +++ b/package.json @@ -47,7 +47,6 @@ "prettier": "^3.0.0", "prettier-2": "npm:prettier@^2", "ts-jest": "^29.1.0", - "ts-morph": "^19.0.0", "ts-node": "^10.5.0", "tsc-multi": "^1.1.0", "tsconfig-paths": "^4.0.0", diff --git a/src/index.ts b/src/index.ts index 8e7e7804e..58d7410e4 100644 --- a/src/index.ts +++ b/src/index.ts @@ -644,7 +644,7 @@ const API_KEY_SENTINEL = ''; // ---------------------- End Azure ---------------------- -export { toFile, fileFromPath } from 'openai/uploads'; +export { toFile, fileFromPath } from './uploads'; export { OpenAIError, APIError, @@ -659,6 +659,6 @@ export { InternalServerError, PermissionDeniedError, UnprocessableEntityError, -} from 'openai/error'; +} from './error'; export default OpenAI; diff --git a/yarn.lock b/yarn.lock index 91b22b941..e139e1fbe 100644 --- a/yarn.lock +++ b/yarn.lock @@ -759,16 +759,6 @@ dependencies: "@swc/counter" "^0.1.3" -"@ts-morph/common@~0.20.0": - version "0.20.0" - resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.20.0.tgz#3f161996b085ba4519731e4d24c35f6cba5b80af" - integrity sha512-7uKjByfbPpwuzkstL3L5MQyuXPSKdoNG93Fmi2JoDcTf3pEP731JdRFAduRVkOs8oqxPsXKA+ScrWkdQ8t/I+Q== - dependencies: - fast-glob "^3.2.12" - minimatch "^7.4.3" - mkdirp "^2.1.6" - path-browserify "^1.0.1" - "@tsconfig/node10@^1.0.7": version "1.0.8" resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.8.tgz#c1e4e80d6f964fbecb3359c43bd48b40f7cadad9" @@ -1315,11 +1305,6 @@ co@^4.6.0: resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" integrity sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ== -code-block-writer@^12.0.0: - version "12.0.0" - resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-12.0.0.tgz#4dd58946eb4234105aff7f0035977b2afdc2a770" - integrity sha512-q4dMFMlXtKR3XNBHyMHt/3pwYNA69EDk00lloMOaaUMKPUXBw6lpXtbu3MMVG6/uOihGnRDOlkyqsONEUj60+w== - collect-v8-coverage@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz#c0b29bcd33bcd0779a1344c2136051e6afd3d9e9" @@ -2687,23 +2672,11 @@ minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2: dependencies: brace-expansion "^1.1.7" -minimatch@^7.4.3: - version "7.4.6" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-7.4.6.tgz#845d6f254d8f4a5e4fd6baf44d5f10c8448365fb" - integrity sha512-sBz8G/YjVniEz6lKPNpKxXwazJe4c19fEfV2GDMX6AjFz+MX9uDWIZW8XreVhkFW3fkIdTv/gxWr/Kks5FFAVw== - dependencies: - brace-expansion "^2.0.1" - minimist@^1.2.6: version "1.2.6" resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== -mkdirp@^2.1.6: - version "2.1.6" - resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-2.1.6.tgz#964fbcb12b2d8c5d6fbc62a963ac95a273e2cc19" - integrity sha512-+hEnITedc8LAtIP9u3HJDFIdcLV2vXP33sqLLIzkv1Db1zO/1OxbvYf0Y1OC/S/Qo5dxHXepofhmxL02PsKe+A== - ms@2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" @@ -2867,11 +2840,6 @@ parse-json@^5.2.0: json-parse-even-better-errors "^2.3.0" lines-and-columns "^1.1.6" -path-browserify@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-1.0.1.tgz#d98454a9c3753d5790860f16f68867b9e46be1fd" - integrity sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g== - path-exists@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" @@ -3300,14 +3268,6 @@ ts-jest@^29.1.0: semver "^7.5.3" yargs-parser "^21.0.1" -ts-morph@^19.0.0: - version "19.0.0" - resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-19.0.0.tgz#43e95fb0156c3fe3c77c814ac26b7d0be2f93169" - integrity sha512-D6qcpiJdn46tUqV45vr5UGM2dnIEuTGNxVhg0sk5NX11orcouwj6i1bMqZIz2mZTZB1Hcgy7C3oEVhAT+f6mbQ== - dependencies: - "@ts-morph/common" "~0.20.0" - code-block-writer "^12.0.0" - ts-node@^10.5.0: version "10.7.0" resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.7.0.tgz#35d503d0fab3e2baa672a0e94f4b40653c2463f5" From 12f93346857196b93f94865cc3744d769e5e519c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 10:42:10 +0000 Subject: [PATCH 324/533] chore(internal): spec update (#1195) --- .stats.yml | 2 +- .../audio/transcriptions.test.ts | 2 +- tests/api-resources/beta/assistants.test.ts | 8 +- .../beta/threads/messages.test.ts | 15 +- .../beta/threads/runs/runs.test.ts | 99 +-------- .../beta/threads/threads.test.ts | 198 +----------------- tests/api-resources/chat/completions.test.ts | 10 +- .../fine-tuning/jobs/jobs.test.ts | 25 +-- tests/api-resources/uploads/uploads.test.ts | 9 +- 9 files changed, 22 insertions(+), 346 deletions(-) diff --git a/.stats.yml b/.stats.yml index f368bc881..fdef8d274 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2f8ca92b9b1879fd535b685e4767338413fcd533d42f3baac13a9c41da3fce35.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fb9db2d2c1f0d6b39d8ee042db5d5c59acba6ad1daf47c18792c1f5fb24b3401.yml diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts index ef2797911..86ef5e576 100644 --- a/tests/api-resources/audio/transcriptions.test.ts +++ b/tests/api-resources/audio/transcriptions.test.ts @@ -31,7 +31,7 @@ describe('resource transcriptions', () => { prompt: 'prompt', response_format: 'json', temperature: 0, - timestamp_granularities: ['word', 'segment'], + timestamp_granularities: ['word'], }); }); }); diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index fdc325254..a64465c77 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -30,15 +30,13 @@ describe('resource assistants', () => { response_format: 'auto', temperature: 1, tool_resources: { - code_interpreter: { file_ids: ['string', 'string', 'string'] }, + code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [ - { chunking_strategy: { type: 'auto' }, file_ids: ['string', 'string', 'string'], metadata: {} }, - ], + vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }], }, }, - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + tools: [{ type: 'code_interpreter' }], top_p: 1, }); }); diff --git a/tests/api-resources/beta/threads/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts index bfbcab1cb..c1f5f7b6e 100644 --- a/tests/api-resources/beta/threads/messages.test.ts +++ b/tests/api-resources/beta/threads/messages.test.ts @@ -27,20 +27,7 @@ describe('resource messages', () => { const response = await client.beta.threads.messages.create('thread_id', { content: 'string', role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], - }, - { - file_id: 'file_id', - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], - }, - { - file_id: 'file_id', - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], - }, - ], + attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], metadata: {}, }); }); diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 352d775c0..4fd8261ac 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -29,94 +29,7 @@ describe('resource runs', () => { { content: 'string', role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], + attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], metadata: {}, }, ], @@ -130,7 +43,7 @@ describe('resource runs', () => { stream: false, temperature: 1, tool_choice: 'none', - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + tools: [{ type: 'code_interpreter' }], top_p: 1, truncation_strategy: { type: 'auto', last_messages: 1 }, }); @@ -214,7 +127,7 @@ describe('resource runs', () => { test('submitToolOutputs: only required params', async () => { const responsePromise = client.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { - tool_outputs: [{}, {}, {}], + tool_outputs: [{}], }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -227,11 +140,7 @@ describe('resource runs', () => { test('submitToolOutputs: required and optional params', async () => { const response = await client.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { - tool_outputs: [ - { output: 'output', tool_call_id: 'tool_call_id' }, - { output: 'output', tool_call_id: 'tool_call_id' }, - { output: 'output', tool_call_id: 'tool_call_id' }, - ], + tool_outputs: [{ output: 'output', tool_call_id: 'tool_call_id' }], stream: false, }); }); diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index dc0a94a7d..aba266316 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -36,109 +36,16 @@ describe('resource threads', () => { { content: 'string', role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], + attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], metadata: {}, }, ], metadata: {}, tool_resources: { - code_interpreter: { file_ids: ['string', 'string', 'string'] }, + code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [ - { - chunking_strategy: { type: 'auto' }, - file_ids: ['string', 'string', 'string'], - metadata: {}, - }, - ], + vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }], }, }, }, @@ -222,114 +129,25 @@ describe('resource threads', () => { { content: 'string', role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], + attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], metadata: {}, }, ], metadata: {}, tool_resources: { - code_interpreter: { file_ids: ['string', 'string', 'string'] }, + code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [ - { chunking_strategy: { type: 'auto' }, file_ids: ['string', 'string', 'string'], metadata: {} }, - ], + vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }], }, }, }, tool_choice: 'none', tool_resources: { - code_interpreter: { file_ids: ['string', 'string', 'string'] }, + code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'] }, }, - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + tools: [{ type: 'code_interpreter' }], top_p: 1, truncation_strategy: { type: 'auto', last_messages: 1 }, }); diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 180a1d77f..5dcbf9ad6 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -36,7 +36,7 @@ describe('resource completions', () => { max_completion_tokens: 0, max_tokens: 0, metadata: { foo: 'string' }, - modalities: ['text', 'audio'], + modalities: ['text'], n: 1, parallel_tool_calls: true, prediction: { content: 'string', type: 'content' }, @@ -55,14 +55,6 @@ describe('resource completions', () => { function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, type: 'function', }, - { - function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, - type: 'function', - }, - { - function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, - type: 'function', - }, ], top_logprobs: 0, top_p: 1, diff --git a/tests/api-resources/fine-tuning/jobs/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts index 646c2f5cf..0ab09768a 100644 --- a/tests/api-resources/fine-tuning/jobs/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -31,30 +31,7 @@ describe('resource jobs', () => { integrations: [ { type: 'wandb', - wandb: { - project: 'my-wandb-project', - entity: 'entity', - name: 'name', - tags: ['custom-tag', 'custom-tag', 'custom-tag'], - }, - }, - { - type: 'wandb', - wandb: { - project: 'my-wandb-project', - entity: 'entity', - name: 'name', - tags: ['custom-tag', 'custom-tag', 'custom-tag'], - }, - }, - { - type: 'wandb', - wandb: { - project: 'my-wandb-project', - entity: 'entity', - name: 'name', - tags: ['custom-tag', 'custom-tag', 'custom-tag'], - }, + wandb: { project: 'my-wandb-project', entity: 'entity', name: 'name', tags: ['custom-tag'] }, }, ], seed: 42, diff --git a/tests/api-resources/uploads/uploads.test.ts b/tests/api-resources/uploads/uploads.test.ts index e4e3c6d30..c9ea4ddd7 100644 --- a/tests/api-resources/uploads/uploads.test.ts +++ b/tests/api-resources/uploads/uploads.test.ts @@ -53,9 +53,7 @@ describe('resource uploads', () => { }); test('complete: only required params', async () => { - const responsePromise = client.uploads.complete('upload_abc123', { - part_ids: ['string', 'string', 'string'], - }); + const responsePromise = client.uploads.complete('upload_abc123', { part_ids: ['string'] }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -66,9 +64,6 @@ describe('resource uploads', () => { }); test('complete: required and optional params', async () => { - const response = await client.uploads.complete('upload_abc123', { - part_ids: ['string', 'string', 'string'], - md5: 'md5', - }); + const response = await client.uploads.complete('upload_abc123', { part_ids: ['string'], md5: 'md5' }); }); }); From 6961c37f2e581bcc12ec2bbe77df2b9b260fe297 Mon Sep 17 00:00:00 2001 From: Young-Jin Park Date: Mon, 18 Nov 2024 16:11:29 -0500 Subject: [PATCH 325/533] feat: bump model in all example snippets to gpt-4o --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 2f05654b4..c363eaa98 100644 --- a/README.md +++ b/README.md @@ -173,7 +173,7 @@ const openai = new OpenAI(); async function main() { const stream = await openai.beta.chat.completions.stream({ - model: 'gpt-4', + model: 'gpt-4o', messages: [{ role: 'user', content: 'Say this is a test' }], stream: true, }); @@ -226,7 +226,7 @@ const client = new OpenAI(); async function main() { const runner = client.beta.chat.completions .runTools({ - model: 'gpt-3.5-turbo', + model: 'gpt-4o', messages: [{ role: 'user', content: 'How is the weather this week?' }], tools: [ { @@ -368,7 +368,7 @@ Error codes are as followed: All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. ```ts -const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4' }); +const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }); console.log(completion._request_id) // req_123 ``` @@ -392,7 +392,7 @@ const azureADTokenProvider = getBearerTokenProvider(credential, scope); const openai = new AzureOpenAI({ azureADTokenProvider }); const result = await openai.chat.completions.create({ - model: 'gpt-4-1106-preview', + model: 'gpt-4o', messages: [{ role: 'user', content: 'Say hello!' }], }); From ebdb4f72cc01afbee649aca009fdaf413e61c507 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 17:57:52 +0000 Subject: [PATCH 326/533] docs: improve jsr documentation (#1197) --- README.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index c363eaa98..5d6ba1a8b 100644 --- a/README.md +++ b/README.md @@ -14,16 +14,21 @@ To learn how to use the OpenAI API, check out our [API Reference](https://platfo npm install openai ``` -You can also import from jsr: +### Installation from JSR - +```sh +deno add jsr:@openai/openai +npx jsr add @openai/openai +``` + +These commands will make the module importable from the `@openai/openai` scope: + +You can also [import directly from JSR](https://jsr.io/docs/using-packages#importing-with-jsr-specifiers) without an install step if you're using the Deno JavaScript runtime: ```ts import OpenAI from 'jsr:@openai/openai'; ``` - - ## Usage The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). The code below shows how to get started using the chat completions API. @@ -622,7 +627,7 @@ TypeScript >= 4.5 is supported. The following runtimes are supported: - Node.js 18 LTS or later ([non-EOL](https://endoflife.date/nodejs)) versions. -- Deno v1.28.0 or higher, using `import OpenAI from "npm:openai"`. +- Deno v1.28.0 or higher. - Bun 1.0 or later. - Cloudflare Workers. - Vercel Edge Runtime. From e34981c00f2f0360baffe870bcc38786030671bf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 23:41:02 +0000 Subject: [PATCH 327/533] docs: change readme title (#1198) --- README.md | 2 +- scripts/build | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5d6ba1a8b..d89e121f1 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# OpenAI Node API Library +# OpenAI TypeScript and JavaScript API Library [![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) [![JSR Version](https://jsr.io/badges/@openai/openai)](https://jsr.io/@openai/openai) diff --git a/scripts/build b/scripts/build index 0246c90e3..4e86f99e2 100755 --- a/scripts/build +++ b/scripts/build @@ -32,7 +32,7 @@ npm exec tsc-multi # copy over handwritten .js/.mjs/.d.ts files cp src/_shims/*.{d.ts,js,mjs,md} dist/_shims cp src/_shims/auto/*.{d.ts,js,mjs} dist/_shims/auto -# we need to add exports = module.exports = OpenAI Node to index.js; +# we need to add exports = module.exports = OpenAI to index.js; # No way to get that from index.ts because it would cause compile errors # when building .mjs node scripts/utils/fix-index-exports.cjs From 3968ef1c4fa860ff246e0e803808752b261c18ce Mon Sep 17 00:00:00 2001 From: Eric He Date: Wed, 20 Nov 2024 02:35:46 -0800 Subject: [PATCH 328/533] docs(readme): fix incorrect fileBatches.uploadAndPoll params (#1200) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d89e121f1..ec17427a6 100644 --- a/README.md +++ b/README.md @@ -133,7 +133,7 @@ const fileList = [ ... ]; -const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, fileList); +const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, {files: fileList}); ``` ### Streaming Helpers From 0feeafd21ba4b6281cc3b9dafa2919b1e2e4d1c3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 18:17:02 +0000 Subject: [PATCH 329/533] feat(api): add gpt-4o-2024-11-20 model (#1201) --- .stats.yml | 2 +- src/resources/batches.ts | 2 +- src/resources/chat/chat.ts | 1 + src/resources/chat/completions.ts | 5 +++-- src/resources/files.ts | 2 +- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.stats.yml b/.stats.yml index fdef8d274..4827e5388 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fb9db2d2c1f0d6b39d8ee042db5d5c59acba6ad1daf47c18792c1f5fb24b3401.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-aa9b01fc0c17eb0cbc200533fc20d6a49c5e764ceaf8049e08b294532be6e9ff.yml diff --git a/src/resources/batches.ts b/src/resources/batches.ts index e68e7569c..ec5ca6331 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -232,7 +232,7 @@ export interface BatchCreateParams { * Your input file must be formatted as a * [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), * and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - * requests, and can be up to 100 MB in size. + * requests, and can be up to 200 MB in size. */ input_file_id: string; diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 351430f8c..09cd3d123 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -49,6 +49,7 @@ export type ChatModel = | 'o1-mini' | 'o1-mini-2024-09-12' | 'gpt-4o' + | 'gpt-4o-2024-11-20' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-05-13' | 'gpt-4o-realtime-preview' diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 9d344744a..8e9a4385e 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -250,8 +250,9 @@ export interface ChatCompletionAudioParam { format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; /** - * The voice the model uses to respond. Supported voices are `alloy`, `ash`, - * `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. + * The voice the model uses to respond. Supported voices are `ash`, `ballad`, + * `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, + * `echo`, and `shimmer`; these voices are less expressive). */ voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; } diff --git a/src/resources/files.ts b/src/resources/files.ts index 48d8f8747..42a7bdfba 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -25,7 +25,7 @@ export class Files extends APIResource { * [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) * models. * - * The Batch API only supports `.jsonl` files up to 100 MB in size. The input also + * The Batch API only supports `.jsonl` files up to 200 MB in size. The input also * has a specific required * [format](https://platform.openai.com/docs/api-reference/batch/request-input). * From 1e9391bc17c29287f2b7bb8acf77390f3e727ad2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 18:17:37 +0000 Subject: [PATCH 330/533] release: 4.73.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 29 +++++++++++++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 33 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e53c9dd88..d3e848620 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.72.0" + ".": "4.73.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 951ef0784..51741f552 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,34 @@ # Changelog +## 4.73.0 (2024-11-20) + +Full Changelog: [v4.72.0...v4.73.0](https://github.com/openai/openai-node/compare/v4.72.0...v4.73.0) + +### Features + +* **api:** add gpt-4o-2024-11-20 model ([#1201](https://github.com/openai/openai-node/issues/1201)) ([0feeafd](https://github.com/openai/openai-node/commit/0feeafd21ba4b6281cc3b9dafa2919b1e2e4d1c3)) +* bump model in all example snippets to gpt-4o ([6961c37](https://github.com/openai/openai-node/commit/6961c37f2e581bcc12ec2bbe77df2b9b260fe297)) + + +### Bug Fixes + +* **docs:** add missing await to pagination example ([#1190](https://github.com/openai/openai-node/issues/1190)) ([524b9e8](https://github.com/openai/openai-node/commit/524b9e82ae13a3b5093dcfbfd1169a798cf99ab4)) + + +### Chores + +* **client:** drop unused devDependency ([#1191](https://github.com/openai/openai-node/issues/1191)) ([8ee6c03](https://github.com/openai/openai-node/commit/8ee6c0335673f2ecf84ea11bdfc990adab607e20)) +* **internal:** spec update ([#1195](https://github.com/openai/openai-node/issues/1195)) ([12f9334](https://github.com/openai/openai-node/commit/12f93346857196b93f94865cc3744d769e5e519c)) +* **internal:** use reexports not destructuring ([#1181](https://github.com/openai/openai-node/issues/1181)) ([f555dd6](https://github.com/openai/openai-node/commit/f555dd6503bc4ccd4d13f4e1a1d36fbbfd51c369)) + + +### Documentation + +* bump models in example snippets to gpt-4o ([#1184](https://github.com/openai/openai-node/issues/1184)) ([4ec4027](https://github.com/openai/openai-node/commit/4ec402790cf3cfbccbf3ef9b61d577b0118977e8)) +* change readme title ([#1198](https://github.com/openai/openai-node/issues/1198)) ([e34981c](https://github.com/openai/openai-node/commit/e34981c00f2f0360baffe870bcc38786030671bf)) +* improve jsr documentation ([#1197](https://github.com/openai/openai-node/issues/1197)) ([ebdb4f7](https://github.com/openai/openai-node/commit/ebdb4f72cc01afbee649aca009fdaf413e61c507)) +* **readme:** fix incorrect fileBatches.uploadAndPoll params ([#1200](https://github.com/openai/openai-node/issues/1200)) ([3968ef1](https://github.com/openai/openai-node/commit/3968ef1c4fa860ff246e0e803808752b261c18ce)) + ## 4.72.0 (2024-11-12) Full Changelog: [v4.71.1...v4.72.0](https://github.com/openai/openai-node/compare/v4.71.1...v4.72.0) diff --git a/jsr.json b/jsr.json index ad1751852..f09f5bbab 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.72.0", + "version": "4.73.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 8a61d468f..13e8ee3bc 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.72.0", + "version": "4.73.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index cad6e2320..4e3a33b17 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.72.0'; // x-release-please-version +export const VERSION = '4.73.0'; // x-release-please-version From aa5443624b4dc206ede08a743ec276b3a576861f Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 22 Nov 2024 19:56:39 +0000 Subject: [PATCH 331/533] docs(readme): mention `.withResponse()` for streaming request ID (#1202) --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index ec17427a6..ac9c84a42 100644 --- a/README.md +++ b/README.md @@ -377,6 +377,18 @@ const completion = await client.chat.completions.create({ messages: [{ role: 'us console.log(completion._request_id) // req_123 ``` +You can also access the Request ID using the `.withResponse()` method: + +```ts +const { data: stream, request_id } = await openai.chat.completions + .create({ + model: 'gpt-4', + messages: [{ role: 'user', content: 'Say this is a test' }], + stream: true, + }) + .withResponse(); +``` + ## Microsoft Azure OpenAI To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` From 3f8634ed111782e3090a25d1d8640e050fb2c45b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 05:07:22 +0000 Subject: [PATCH 332/533] release: 4.73.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d3e848620..92fcace17 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.73.0" + ".": "4.73.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 51741f552..c32a0ce32 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.73.1 (2024-11-25) + +Full Changelog: [v4.73.0...v4.73.1](https://github.com/openai/openai-node/compare/v4.73.0...v4.73.1) + +### Documentation + +* **readme:** mention `.withResponse()` for streaming request ID ([#1202](https://github.com/openai/openai-node/issues/1202)) ([b6800d4](https://github.com/openai/openai-node/commit/b6800d4dea2729fe3b0864171ce8fb3b2cc1b21c)) + ## 4.73.0 (2024-11-20) Full Changelog: [v4.72.0...v4.73.0](https://github.com/openai/openai-node/compare/v4.72.0...v4.73.0) diff --git a/jsr.json b/jsr.json index f09f5bbab..0bd5eab3f 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.73.0", + "version": "4.73.1", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 13e8ee3bc..685d59f56 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.73.0", + "version": "4.73.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 4e3a33b17..28fbb6572 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.73.0'; // x-release-please-version +export const VERSION = '4.73.1'; // x-release-please-version From 2628a0bc6a380478889d94cf6f08cb179eab9e9c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 27 Nov 2024 16:15:42 +0000 Subject: [PATCH 333/533] feat(internal): make git install file structure match npm (#1204) --- package.json | 2 +- scripts/utils/check-is-in-git-install.sh | 2 +- scripts/utils/git-swap.sh | 13 +++++++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) create mode 100755 scripts/utils/git-swap.sh diff --git a/package.json b/package.json index 685d59f56..87004d273 100644 --- a/package.json +++ b/package.json @@ -18,7 +18,7 @@ "build": "./scripts/build", "prepublishOnly": "echo 'to publish, run yarn build && (cd dist; yarn publish)' && exit 1", "format": "prettier --write --cache --cache-strategy metadata . !dist", - "prepare": "if ./scripts/utils/check-is-in-git-install.sh; then ./scripts/build; fi", + "prepare": "if ./scripts/utils/check-is-in-git-install.sh; then ./scripts/build && ./scripts/utils/git-swap.sh; fi", "tsn": "ts-node -r tsconfig-paths/register", "lint": "./scripts/lint", "fix": "./scripts/format" diff --git a/scripts/utils/check-is-in-git-install.sh b/scripts/utils/check-is-in-git-install.sh index 36bcedc20..1354eb432 100755 --- a/scripts/utils/check-is-in-git-install.sh +++ b/scripts/utils/check-is-in-git-install.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Check if you happen to call prepare for a repository that's already in node_modules. [ "$(basename "$(dirname "$PWD")")" = 'node_modules' ] || # The name of the containing directory that 'npm` uses, which looks like diff --git a/scripts/utils/git-swap.sh b/scripts/utils/git-swap.sh new file mode 100755 index 000000000..79d1888eb --- /dev/null +++ b/scripts/utils/git-swap.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -exuo pipefail +# the package is published to NPM from ./dist +# we want the final file structure for git installs to match the npm installs, so we + +# delete everything except ./dist and ./node_modules +find . -maxdepth 1 -mindepth 1 ! -name 'dist' ! -name 'node_modules' -exec rm -rf '{}' + + +# move everything from ./dist to . +mv dist/* . + +# delete the now-empty ./dist +rmdir dist From d40c61cfc8c4f5f6aea4ffdd3ea3909e02b92bd5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 05:07:36 +0000 Subject: [PATCH 334/533] release: 4.74.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 92fcace17..8edd9c22e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.73.1" + ".": "4.74.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index c32a0ce32..595091ff3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.74.0 (2024-12-02) + +Full Changelog: [v4.73.1...v4.74.0](https://github.com/openai/openai-node/compare/v4.73.1...v4.74.0) + +### Features + +* **internal:** make git install file structure match npm ([#1204](https://github.com/openai/openai-node/issues/1204)) ([e7c4c6d](https://github.com/openai/openai-node/commit/e7c4c6d23adbe52300053a8d35db6e341c438703)) + ## 4.73.1 (2024-11-25) Full Changelog: [v4.73.0...v4.73.1](https://github.com/openai/openai-node/compare/v4.73.0...v4.73.1) diff --git a/jsr.json b/jsr.json index 0bd5eab3f..eb073e7e6 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.73.1", + "version": "4.74.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 87004d273..7e188774a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.73.1", + "version": "4.74.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 28fbb6572..b8dd781be 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.73.1'; // x-release-please-version +export const VERSION = '4.74.0'; // x-release-please-version From d0e210dd43b8cfbc804111b9923a26dd30bcc87f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 02:02:23 +0000 Subject: [PATCH 335/533] feat: improve docs for jsr README.md (#1208) --- scripts/build-deno | 2 + scripts/utils/convert-jsr-readme.cjs | 140 +++++++++++++++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 scripts/utils/convert-jsr-readme.cjs diff --git a/scripts/build-deno b/scripts/build-deno index dfce83548..bce31078e 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -25,3 +25,5 @@ done for file in README.md LICENSE CHANGELOG.md; do if [ -e "${file}" ]; then cp "${file}" dist-deno; fi done + +node scripts/utils/convert-jsr-readme.cjs ./dist-deno/README.md diff --git a/scripts/utils/convert-jsr-readme.cjs b/scripts/utils/convert-jsr-readme.cjs new file mode 100644 index 000000000..f9d089c73 --- /dev/null +++ b/scripts/utils/convert-jsr-readme.cjs @@ -0,0 +1,140 @@ +const fs = require('fs'); +const { parse } = require('@typescript-eslint/parser'); +const { TSError } = require('@typescript-eslint/typescript-estree'); + +/** + * Quick and dirty AST traversal + */ +function traverse(node, visitor) { + if (!node || typeof node.type !== 'string') return; + visitor.node?.(node); + visitor[node.type]?.(node); + for (const key in node) { + const value = node[key]; + if (Array.isArray(value)) { + for (const elem of value) traverse(elem, visitor); + } else if (value instanceof Object) { + traverse(value, visitor); + } + } +} + +/** + * Helper method for replacing arbitrary ranges of text in input code. + */ +function replaceRanges(code, replacer) { + const replacements = []; + replacer({ replace: (range, replacement) => replacements.push({ range, replacement }) }); + + if (!replacements.length) return code; + replacements.sort((a, b) => a.range[0] - b.range[0]); + const overlapIndex = replacements.findIndex( + (r, index) => index > 0 && replacements[index - 1].range[1] > r.range[0], + ); + if (overlapIndex >= 0) { + throw new Error( + `replacements overlap: ${JSON.stringify(replacements[overlapIndex - 1])} and ${JSON.stringify( + replacements[overlapIndex], + )}`, + ); + } + + const parts = []; + let end = 0; + for (const { + range: [from, to], + replacement, + } of replacements) { + if (from > end) parts.push(code.substring(end, from)); + parts.push(replacement); + end = to; + } + if (end < code.length) parts.push(code.substring(end)); + return parts.join(''); +} + +function replaceProcessEnv(content) { + // Replace process.env['KEY'] and process.env.KEY with Deno.env.get('KEY') + return content.replace(/process\.env(?:\.|\[['"])(.+?)(?:['"]\])/g, "Deno.env.get('$1')"); +} + +function replaceProcessStdout(content) { + return content.replace(/process\.stdout.write\(([^)]+)\)/g, 'Deno.stdout.writeSync($1)'); +} + +function replaceInstallationDirections(content) { + // Remove npm installation section + return content.replace(/```sh\nnpm install.*?\n```.*### Installation from JSR\n\n/s, ''); +} + +/** + * Maps over module paths in imports and exports + */ +function replaceImports(code, config) { + try { + const ast = parse(code, { sourceType: 'module', range: true }); + return replaceRanges(code, ({ replace }) => + traverse(ast, { + node(node) { + switch (node.type) { + case 'ImportDeclaration': + case 'ExportNamedDeclaration': + case 'ExportAllDeclaration': + case 'ImportExpression': + if (node.source) { + const { range, value } = node.source; + if (value.startsWith(config.npm)) { + replace(range, JSON.stringify(value.replace(config.npm, config.jsr))); + } + } + } + }, + }), + ); + } catch (e) { + if (e instanceof TSError) { + // This can error if the code block is not valid TS, in this case give up trying to transform the imports. + console.warn(`Original codeblock could not be parsed, replace import skipped: ${e}\n\n${code}`); + return code; + } + throw e; + } +} + +function processReadme(config, file) { + try { + let readmeContent = fs.readFileSync(file, 'utf8'); + + // First replace installation directions + readmeContent = replaceInstallationDirections(readmeContent); + + // Replace content in all code blocks with a single regex + readmeContent = readmeContent.replaceAll( + /```(?:typescript|ts|javascript|js)\n([\s\S]*?)```/g, + (match, codeBlock) => { + try { + let transformedCode = codeBlock.trim(); + transformedCode = replaceImports(transformedCode, config); + transformedCode = replaceProcessEnv(transformedCode); + transformedCode = replaceProcessStdout(transformedCode); + return '```typescript\n' + transformedCode + '\n```'; + } catch (error) { + console.warn(`Failed to transform code block: ${error}\n\n${codeBlock}`); + return match; // Return original code block if transformation fails + } + }, + ); + + fs.writeFileSync(file, readmeContent); + } catch (error) { + console.error('Error processing README:', error); + throw error; + } +} + +const config = { + npm: 'openai', + jsr: '@openai/openai', +}; + +processReadme(config, process.argv[2]); From ddb27b660950735f13934759c3db049bcf4dafd5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 05:07:20 +0000 Subject: [PATCH 336/533] release: 4.75.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 8edd9c22e..6258f1481 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.74.0" + ".": "4.75.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 595091ff3..2d91a77c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.75.0 (2024-12-03) + +Full Changelog: [v4.74.0...v4.75.0](https://github.com/openai/openai-node/compare/v4.74.0...v4.75.0) + +### Features + +* improve docs for jsr README.md ([#1208](https://github.com/openai/openai-node/issues/1208)) ([338527e](https://github.com/openai/openai-node/commit/338527e40361e2de899a63f280d4ec2db5e87f3c)) + ## 4.74.0 (2024-12-02) Full Changelog: [v4.73.1...v4.74.0](https://github.com/openai/openai-node/compare/v4.73.1...v4.74.0) diff --git a/jsr.json b/jsr.json index eb073e7e6..a394539d1 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.74.0", + "version": "4.75.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 7e188774a..5738871a3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.74.0", + "version": "4.75.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index b8dd781be..82fc52958 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.74.0'; // x-release-please-version +export const VERSION = '4.75.0'; // x-release-please-version From 0f74bf4576ed26884f9ef9148bd854e60250c1a9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 20:20:25 +0000 Subject: [PATCH 337/533] chore: bump openapi url (#1210) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 4827e5388..19920c8be 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-aa9b01fc0c17eb0cbc200533fc20d6a49c5e764ceaf8049e08b294532be6e9ff.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-d702cba829ceda336f44d0eb89ce61dba353849a40f0193e7007439345daf1bb.yml From f19c56e6087423cb2ef20aaa6b597467f4d81e48 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 20:53:30 +0000 Subject: [PATCH 338/533] feat(api): updates (#1212) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 19920c8be..3cc042fe0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-d702cba829ceda336f44d0eb89ce61dba353849a40f0193e7007439345daf1bb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2e0e0678be19d1118fd796af291822075e40538dba326611e177e9f3dc245a53.yml From fbd968576357e635e541a3475a67fb741f603292 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 5 Dec 2024 05:07:39 +0000 Subject: [PATCH 339/533] release: 4.76.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6258f1481..1cc8c9627 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.75.0" + ".": "4.76.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d91a77c9..e68b45e8a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.76.0 (2024-12-05) + +Full Changelog: [v4.75.0...v4.76.0](https://github.com/openai/openai-node/compare/v4.75.0...v4.76.0) + +### Features + +* **api:** updates ([#1212](https://github.com/openai/openai-node/issues/1212)) ([e0fedf2](https://github.com/openai/openai-node/commit/e0fedf2c5a91d0c03d8dad6854b366f77eab4923)) + + +### Chores + +* bump openapi url ([#1210](https://github.com/openai/openai-node/issues/1210)) ([3fa95a4](https://github.com/openai/openai-node/commit/3fa95a429d4b2adecce35a7b96b73f6d5e88eeeb)) + ## 4.75.0 (2024-12-03) Full Changelog: [v4.74.0...v4.75.0](https://github.com/openai/openai-node/compare/v4.74.0...v4.75.0) diff --git a/jsr.json b/jsr.json index a394539d1..2c6820969 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.75.0", + "version": "4.76.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 5738871a3..fae301ee7 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.75.0", + "version": "4.76.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 82fc52958..b4cc35ca9 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.75.0'; // x-release-please-version +export const VERSION = '4.76.0'; // x-release-please-version From c35555790a7cba54517f43e080d2b2dc6d8ea404 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 18:32:44 +0000 Subject: [PATCH 340/533] chore(internal): remove unnecessary getRequestClient function (#1215) --- src/core.ts | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/core.ts b/src/core.ts index 0c8e69ffc..803496412 100644 --- a/src/core.ts +++ b/src/core.ts @@ -558,19 +558,13 @@ export abstract class APIClient { const timeout = setTimeout(() => controller.abort(), ms); return ( - this.getRequestClient() - // use undefined this binding; fetch errors if bound to something else in browser/cloudflare - .fetch.call(undefined, url, { signal: controller.signal as any, ...options }) - .finally(() => { - clearTimeout(timeout); - }) + // use undefined this binding; fetch errors if bound to something else in browser/cloudflare + this.fetch.call(undefined, url, { signal: controller.signal as any, ...options }).finally(() => { + clearTimeout(timeout); + }) ); } - protected getRequestClient(): RequestClient { - return { fetch: this.fetch }; - } - private shouldRetry(response: Response): boolean { // Note this is not a standard header. const shouldRetryHeader = response.headers.get('x-should-retry'); From fb4820e04a9d579e9a8913dd98cc29cf32a9a7cc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 13:57:28 +0000 Subject: [PATCH 341/533] chore(internal): bump cross-spawn to v7.0.6 (#1217) Note: it is a dev transitive dependency. --- yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yarn.lock b/yarn.lock index e139e1fbe..f86935095 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1370,9 +1370,9 @@ create-require@^1.1.0: integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== cross-spawn@^7.0.2, cross-spawn@^7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + version "7.0.6" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" + integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== dependencies: path-key "^3.1.0" shebang-command "^2.0.0" From 6e8c1d06dcf098ec3dabe1128d29b22eee4f4b58 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 13:58:00 +0000 Subject: [PATCH 342/533] release: 4.76.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1cc8c9627..10a72c4fa 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.76.0" + ".": "4.76.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e68b45e8a..7ea1f7f7f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.76.1 (2024-12-10) + +Full Changelog: [v4.76.0...v4.76.1](https://github.com/openai/openai-node/compare/v4.76.0...v4.76.1) + +### Chores + +* **internal:** bump cross-spawn to v7.0.6 ([#1217](https://github.com/openai/openai-node/issues/1217)) ([c07ad29](https://github.com/openai/openai-node/commit/c07ad298d58e5aeaf816ee3de65fd59bf3fc8b66)) +* **internal:** remove unnecessary getRequestClient function ([#1215](https://github.com/openai/openai-node/issues/1215)) ([bef3925](https://github.com/openai/openai-node/commit/bef392526cd339f45c574bc476649c77be36c612)) + ## 4.76.0 (2024-12-05) Full Changelog: [v4.75.0...v4.76.0](https://github.com/openai/openai-node/compare/v4.75.0...v4.76.0) diff --git a/jsr.json b/jsr.json index 2c6820969..3fa6b07da 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.76.0", + "version": "4.76.1", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index fae301ee7..ddffb2c6a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.76.0", + "version": "4.76.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index b4cc35ca9..4f8b7224e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.76.0'; // x-release-please-version +export const VERSION = '4.76.1'; // x-release-please-version From 94ef9d75f20699e80c81fb0defd31dc62d8d3585 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 10 Dec 2024 22:25:12 +0000 Subject: [PATCH 343/533] chore(types): nicer error class types + jsdocs (#1219) --- src/error.ts | 64 ++++++++++++++++++++-------------------------------- 1 file changed, 24 insertions(+), 40 deletions(-) diff --git a/src/error.ts b/src/error.ts index 72b4f7bfd..f3dc57610 100644 --- a/src/error.ts +++ b/src/error.ts @@ -4,10 +4,17 @@ import { castToError, Headers } from './core'; export class OpenAIError extends Error {} -export class APIError extends OpenAIError { - readonly status: number | undefined; - readonly headers: Headers | undefined; - readonly error: Object | undefined; +export class APIError< + TStatus extends number | undefined = number | undefined, + THeaders extends Headers | undefined = Headers | undefined, + TError extends Object | undefined = Object | undefined, +> extends OpenAIError { + /** HTTP status for the response that caused the error */ + readonly status: TStatus; + /** HTTP headers for the response that caused the error */ + readonly headers: THeaders; + /** JSON body of the response that caused the error */ + readonly error: TError; readonly code: string | null | undefined; readonly param: string | null | undefined; @@ -15,19 +22,14 @@ export class APIError extends OpenAIError { readonly request_id: string | null | undefined; - constructor( - status: number | undefined, - error: Object | undefined, - message: string | undefined, - headers: Headers | undefined, - ) { + constructor(status: TStatus, error: TError, message: string | undefined, headers: THeaders) { super(`${APIError.makeMessage(status, error, message)}`); this.status = status; this.headers = headers; this.request_id = headers?.['x-request-id']; + this.error = error; const data = error as Record; - this.error = data; this.code = data?.['code']; this.param = data?.['param']; this.type = data?.['type']; @@ -60,7 +62,7 @@ export class APIError extends OpenAIError { message: string | undefined, headers: Headers | undefined, ): APIError { - if (!status) { + if (!status || !headers) { return new APIConnectionError({ message, cause: castToError(errorResponse) }); } @@ -102,17 +104,13 @@ export class APIError extends OpenAIError { } } -export class APIUserAbortError extends APIError { - override readonly status: undefined = undefined; - +export class APIUserAbortError extends APIError { constructor({ message }: { message?: string } = {}) { super(undefined, undefined, message || 'Request was aborted.', undefined); } } -export class APIConnectionError extends APIError { - override readonly status: undefined = undefined; - +export class APIConnectionError extends APIError { constructor({ message, cause }: { message?: string | undefined; cause?: Error | undefined }) { super(undefined, undefined, message || 'Connection error.', undefined); // in some environments the 'cause' property is already declared @@ -127,35 +125,21 @@ export class APIConnectionTimeoutError extends APIConnectionError { } } -export class BadRequestError extends APIError { - override readonly status: 400 = 400; -} +export class BadRequestError extends APIError<400, Headers> {} -export class AuthenticationError extends APIError { - override readonly status: 401 = 401; -} +export class AuthenticationError extends APIError<401, Headers> {} -export class PermissionDeniedError extends APIError { - override readonly status: 403 = 403; -} +export class PermissionDeniedError extends APIError<403, Headers> {} -export class NotFoundError extends APIError { - override readonly status: 404 = 404; -} +export class NotFoundError extends APIError<404, Headers> {} -export class ConflictError extends APIError { - override readonly status: 409 = 409; -} +export class ConflictError extends APIError<409, Headers> {} -export class UnprocessableEntityError extends APIError { - override readonly status: 422 = 422; -} +export class UnprocessableEntityError extends APIError<422, Headers> {} -export class RateLimitError extends APIError { - override readonly status: 429 = 429; -} +export class RateLimitError extends APIError<429, Headers> {} -export class InternalServerError extends APIError {} +export class InternalServerError extends APIError {} export class LengthFinishReasonError extends OpenAIError { constructor() { From f13fed4137bbbe2e6e0a83c1820ccdeecb6ddf01 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:17:57 +0000 Subject: [PATCH 344/533] chore(internal): update isAbsoluteURL (#1223) --- src/core.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core.ts b/src/core.ts index 803496412..e1a93f272 100644 --- a/src/core.ts +++ b/src/core.ts @@ -1013,8 +1013,8 @@ export const safeJSON = (text: string) => { } }; -// https://stackoverflow.com/a/19709846 -const startsWithSchemeRegexp = new RegExp('^(?:[a-z]+:)?//', 'i'); +// https://url.spec.whatwg.org/#url-scheme-string +const startsWithSchemeRegexp = /^[a-z][a-z0-9+.-]*:/i; const isAbsoluteURL = (url: string): boolean => { return startsWithSchemeRegexp.test(url); }; From 6608f957b62a734c93c006bade5e3b0b8b577c4c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 05:07:38 +0000 Subject: [PATCH 345/533] release: 4.76.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 10a72c4fa..47a7d26b6 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.76.1" + ".": "4.76.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ea1f7f7f..27946ddea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 4.76.2 (2024-12-12) + +Full Changelog: [v4.76.1...v4.76.2](https://github.com/openai/openai-node/compare/v4.76.1...v4.76.2) + +### Chores + +* **internal:** update isAbsoluteURL ([#1223](https://github.com/openai/openai-node/issues/1223)) ([e908ed7](https://github.com/openai/openai-node/commit/e908ed759996fb7706baf46d094fc77419423971)) +* **types:** nicer error class types + jsdocs ([#1219](https://github.com/openai/openai-node/issues/1219)) ([576d24c](https://github.com/openai/openai-node/commit/576d24cc4b3d766dfe28a6031bdc24ac1b711655)) + ## 4.76.1 (2024-12-10) Full Changelog: [v4.76.0...v4.76.1](https://github.com/openai/openai-node/compare/v4.76.0...v4.76.1) diff --git a/jsr.json b/jsr.json index 3fa6b07da..101edee15 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.76.1", + "version": "4.76.2", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index ddffb2c6a..53b82f070 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.76.1", + "version": "4.76.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 4f8b7224e..7117b1feb 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.76.1'; // x-release-please-version +export const VERSION = '4.76.2'; // x-release-please-version From 28649f8de711c6379edb6b9e656a9ac3bafdf763 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 13 Dec 2024 09:43:39 +0000 Subject: [PATCH 346/533] chore(internal): better ecosystem test debugging --- ecosystem-tests/cli.ts | 4 ++++ package.json | 3 ++- yarn.lock | 25 +++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index 4803b47c2..00120e5f9 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -4,6 +4,10 @@ import yargs from 'yargs'; import assert from 'assert'; import path from 'path'; +// @ts-ignore +var SegfaultHandler = require('segfault-handler'); +SegfaultHandler.registerHandler('crash.log'); + const TAR_NAME = 'openai.tgz'; const PACK_FOLDER = '.pack'; const PACK_FILE = `${PACK_FOLDER}/${TAR_NAME}`; diff --git a/package.json b/package.json index 53b82f070..35873e1c1 100644 --- a/package.json +++ b/package.json @@ -41,11 +41,12 @@ "eslint": "^8.49.0", "eslint-plugin-prettier": "^5.0.1", "eslint-plugin-unused-imports": "^3.0.0", - "iconv-lite": "^0.6.3", "fast-check": "^3.22.0", + "iconv-lite": "^0.6.3", "jest": "^29.4.0", "prettier": "^3.0.0", "prettier-2": "npm:prettier@^2", + "segfault-handler": "^1.3.0", "ts-jest": "^29.1.0", "ts-node": "^10.5.0", "tsc-multi": "^1.1.0", diff --git a/yarn.lock b/yarn.lock index f86935095..c0220f984 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1169,6 +1169,13 @@ big-integer@^1.6.44: resolved "https://registry.yarnpkg.com/big-integer/-/big-integer-1.6.52.tgz#60a887f3047614a8e1bffe5d7173490a97dc8c85" integrity sha512-QxD8cf2eVqJOOz63z6JIN9BzvVs/dlySa5HGSBH5xtR8dPteIRQnBxxKqkNTiT6jbDTF6jAfrd4oMcND9RGbQg== +bindings@^1.2.1: + version "1.5.0" + resolved "https://registry.yarnpkg.com/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df" + integrity sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ== + dependencies: + file-uri-to-path "1.0.0" + bplist-parser@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/bplist-parser/-/bplist-parser-0.2.0.tgz#43a9d183e5bf9d545200ceac3e712f79ebbe8d0e" @@ -1746,6 +1753,11 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" +file-uri-to-path@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz#553a7b8446ff6f684359c445f1e37a05dacc33dd" + integrity sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw== + fill-range@^7.1.1: version "7.1.1" resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" @@ -2687,6 +2699,11 @@ ms@^2.0.0, ms@^2.1.3: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== +nan@^2.14.0: + version "2.22.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.22.0.tgz#31bc433fc33213c97bad36404bb68063de604de3" + integrity sha512-nbajikzWTMwsW+eSsNm3QwlOs7het9gGJU5dDZzRTQGk03vyBOauxgI4VakDzE0PtsGTmXPsXTbbjVhRwR5mpw== + natural-compare@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" @@ -3037,6 +3054,14 @@ safe-buffer@~5.2.0: resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== +segfault-handler@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/segfault-handler/-/segfault-handler-1.3.0.tgz#054bc847832fa14f218ba6a79e42877501c8870e" + integrity sha512-p7kVHo+4uoYkr0jmIiTBthwV5L2qmWtben/KDunDZ834mbos+tY+iO0//HpAJpOFSQZZ+wxKWuRo4DxV02B7Lg== + dependencies: + bindings "^1.2.1" + nan "^2.14.0" + semver@^6.3.0, semver@^6.3.1: version "6.3.1" resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" From 3a6bfe41e3b7277032844ff12186d6f0d0f83554 Mon Sep 17 00:00:00 2001 From: Guspan Tanadi <36249910+guspan-tanadi@users.noreply.github.com> Date: Fri, 13 Dec 2024 21:27:45 +0700 Subject: [PATCH 347/533] docs(README): fix helpers section links (#1224) --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index ac9c84a42..b03bcd870 100644 --- a/README.md +++ b/README.md @@ -200,7 +200,7 @@ main(); ``` Streaming with `openai.beta.chat.completions.stream({…})` exposes -[various helpers for your convenience](helpers.md#events) including event handlers and promises. +[various helpers for your convenience](helpers.md#chat-events) including event handlers and promises. Alternatively, you can use `openai.chat.completions.create({ stream: true, … })` which only returns an async iterable of the chunks in the stream and thus uses less memory @@ -285,12 +285,12 @@ main(); // Final content: "It's looking cold and rainy - you might want to wear a jacket!" ``` -Like with `.stream()`, we provide a variety of [helpers and events](helpers.md#events). +Like with `.stream()`, we provide a variety of [helpers and events](helpers.md#chat-events). Note that `runFunctions` was previously available as well, but has been deprecated in favor of `runTools`. Read more about various examples such as with integrating with [zod](helpers.md#integrate-with-zod), -[next.js](helpers.md#integrate-wtih-next-js), and [proxying a stream to the browser](helpers.md#proxy-streaming-to-a-browser). +[next.js](helpers.md#integrate-with-nextjs), and [proxying a stream to the browser](helpers.md#proxy-streaming-to-a-browser). ## File uploads From f361a0c0eb6ae72a902863d6e338f71dc55e416a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Dec 2024 14:28:16 +0000 Subject: [PATCH 348/533] release: 4.76.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 47a7d26b6..52c31fe71 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.76.2" + ".": "4.76.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 27946ddea..4b6f57fe4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.76.3 (2024-12-13) + +Full Changelog: [v4.76.2...v4.76.3](https://github.com/openai/openai-node/compare/v4.76.2...v4.76.3) + +### Chores + +* **internal:** better ecosystem test debugging ([86fc0a8](https://github.com/openai/openai-node/commit/86fc0a81ede2780d3fcebaabff3d9fa9a36cc9c0)) + + +### Documentation + +* **README:** fix helpers section links ([#1224](https://github.com/openai/openai-node/issues/1224)) ([efbe30a](https://github.com/openai/openai-node/commit/efbe30a156cec1836d3db28f663066b33be57ba2)) + ## 4.76.2 (2024-12-12) Full Changelog: [v4.76.1...v4.76.2](https://github.com/openai/openai-node/compare/v4.76.1...v4.76.2) diff --git a/jsr.json b/jsr.json index 101edee15..ef9ce6848 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.76.2", + "version": "4.76.3", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 35873e1c1..47f363ba1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.76.2", + "version": "4.76.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 7117b1feb..01cd56405 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.76.2'; // x-release-please-version +export const VERSION = '4.76.3'; // x-release-please-version From bd1a82dc8f867c271fc6f226c7d98f8de439ab7c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 17:26:31 +0000 Subject: [PATCH 349/533] chore(internal): fix some typos (#1227) --- src/core.ts | 4 ++-- tests/index.test.ts | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core.ts b/src/core.ts index e1a93f272..68f1e676a 100644 --- a/src/core.ts +++ b/src/core.ts @@ -198,7 +198,7 @@ export abstract class APIClient { maxRetries = 2, timeout = 600000, // 10 minutes httpAgent, - fetch: overridenFetch, + fetch: overriddenFetch, }: { baseURL: string; maxRetries?: number | undefined; @@ -211,7 +211,7 @@ export abstract class APIClient { this.timeout = validatePositiveInteger('timeout', timeout); this.httpAgent = httpAgent; - this.fetch = overridenFetch ?? fetch; + this.fetch = overriddenFetch ?? fetch; } protected authHeaders(opts: FinalRequestOptions): Headers { diff --git a/tests/index.test.ts b/tests/index.test.ts index f39571121..bf113e7bb 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -177,7 +177,7 @@ describe('instantiate client', () => { expect(client.apiKey).toBe('My API Key'); }); - test('with overriden environment variable arguments', () => { + test('with overridden environment variable arguments', () => { // set options via env var process.env['OPENAI_API_KEY'] = 'another My API Key'; const client = new OpenAI({ apiKey: 'My API Key' }); From 4984aaccbddcd05349c0c47c608b387b5b1f7ef6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 17:56:30 +0000 Subject: [PATCH 350/533] feat(api): new o1 and GPT-4o models + preference fine-tuning (#1229) learn more here: https://platform.openai.com/docs/changelog --- .stats.yml | 2 +- api.md | 2 + src/index.ts | 4 + src/resources/chat/chat.ts | 11 +- src/resources/chat/completions.ts | 96 +++++-- src/resources/chat/index.ts | 2 + src/resources/fine-tuning/jobs/jobs.ts | 270 +++++++++++++++++- tests/api-resources/chat/completions.test.ts | 5 +- .../fine-tuning/jobs/jobs.test.ts | 14 + 9 files changed, 372 insertions(+), 34 deletions(-) diff --git a/.stats.yml b/.stats.yml index 3cc042fe0..d4d7d3c40 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2e0e0678be19d1118fd796af291822075e40538dba326611e177e9f3dc245a53.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-779ea2754025daf5e18eb8ceb203ec321692636bc3a999338556a479178efa6c.yml diff --git a/api.md b/api.md index 465730de8..54bcf08d7 100644 --- a/api.md +++ b/api.md @@ -41,6 +41,7 @@ Types: - ChatCompletionContentPartInputAudio - ChatCompletionContentPartRefusal - ChatCompletionContentPartText +- ChatCompletionDeveloperMessageParam - ChatCompletionFunctionCallOption - ChatCompletionFunctionMessageParam - ChatCompletionMessage @@ -49,6 +50,7 @@ Types: - ChatCompletionModality - ChatCompletionNamedToolChoice - ChatCompletionPredictionContent +- ChatCompletionReasoningEffort - ChatCompletionRole - ChatCompletionStreamOptions - ChatCompletionSystemMessageParam diff --git a/src/index.ts b/src/index.ts index 58d7410e4..2320850fb 100644 --- a/src/index.ts +++ b/src/index.ts @@ -80,6 +80,7 @@ import { ChatCompletionCreateParams, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, + ChatCompletionDeveloperMessageParam, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, ChatCompletionMessage, @@ -88,6 +89,7 @@ import { ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, + ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, @@ -353,6 +355,7 @@ export declare namespace OpenAI { type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, type ChatCompletionMessage as ChatCompletionMessage, @@ -361,6 +364,7 @@ export declare namespace OpenAI { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 09cd3d123..2230b19bd 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -16,6 +16,7 @@ import { ChatCompletionCreateParams, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, + ChatCompletionDeveloperMessageParam, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, ChatCompletionMessage, @@ -24,6 +25,7 @@ import { ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, + ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, @@ -44,6 +46,8 @@ export class Chat extends APIResource { } export type ChatModel = + | 'o1' + | 'o1-2024-12-17' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o1-mini' @@ -52,10 +56,11 @@ export type ChatModel = | 'gpt-4o-2024-11-20' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-05-13' - | 'gpt-4o-realtime-preview' - | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' + | 'gpt-4o-audio-preview-2024-12-17' + | 'gpt-4o-mini-audio-preview' + | 'gpt-4o-mini-audio-preview-2024-12-17' | 'chatgpt-4o-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' @@ -96,6 +101,7 @@ export declare namespace Chat { type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, type ChatCompletionMessage as ChatCompletionMessage, @@ -104,6 +110,7 @@ export declare namespace Chat { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 8e9a4385e..31f5814cb 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -15,6 +15,12 @@ export class Completions extends APIResource { * [text generation](https://platform.openai.com/docs/guides/text-generation), * [vision](https://platform.openai.com/docs/guides/vision), and * [audio](https://platform.openai.com/docs/guides/audio) guides. + * + * Parameter support can differ depending on the model used to generate the + * response, particularly for newer reasoning models. Parameters that are only + * supported for reasoning models are noted below. For the current state of + * unsupported parameters in reasoning models, + * [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). */ create( body: ChatCompletionCreateParamsNonStreaming, @@ -135,6 +141,9 @@ export namespace ChatCompletion { } } +/** + * Messages sent by the model in response to user messages. + */ export interface ChatCompletionAssistantMessageParam { /** * The role of the messages author, in this case `assistant`. @@ -530,6 +539,29 @@ export interface ChatCompletionContentPartText { type: 'text'; } +/** + * Developer-provided instructions that the model should follow, regardless of + * messages sent by the user. With o1 models and newer, `developer` messages + * replace the previous `system` messages. + */ +export interface ChatCompletionDeveloperMessageParam { + /** + * The contents of the developer message. + */ + content: string | Array; + + /** + * The role of the messages author, in this case `developer`. + */ + role: 'developer'; + + /** + * An optional name for the participant. Provides the model information to + * differentiate between participants of the same role. + */ + name?: string; +} + /** * Specifying a particular function via `{"name": "my_function"}` forces the model * to call that function. @@ -620,7 +652,13 @@ export namespace ChatCompletionMessage { } } +/** + * Developer-provided instructions that the model should follow, regardless of + * messages sent by the user. With o1 models and newer, `developer` messages + * replace the previous `system` messages. + */ export type ChatCompletionMessageParam = + | ChatCompletionDeveloperMessageParam | ChatCompletionSystemMessageParam | ChatCompletionUserMessageParam | ChatCompletionAssistantMessageParam @@ -707,6 +745,16 @@ export interface ChatCompletionPredictionContent { type: 'content'; } +/** + * **o1 models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ +export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high'; + /** * The role of the author of a message */ @@ -725,6 +773,11 @@ export interface ChatCompletionStreamOptions { include_usage?: boolean; } +/** + * Developer-provided instructions that the model should follow, regardless of + * messages sent by the user. With o1 models and newer, use `developer` messages + * for this purpose instead. + */ export interface ChatCompletionSystemMessageParam { /** * The contents of the system message. @@ -835,6 +888,10 @@ export interface ChatCompletionToolMessageParam { tool_call_id: string; } +/** + * Messages sent by an end user, containing prompts or additional context + * information. + */ export interface ChatCompletionUserMessageParam { /** * The contents of the user message. @@ -891,20 +948,22 @@ export interface ChatCompletionCreateParamsBase { * Number between -2.0 and 2.0. Positive values penalize new tokens based on their * existing frequency in the text so far, decreasing the model's likelihood to * repeat the same line verbatim. - * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ frequency_penalty?: number | null; /** * Deprecated in favor of `tool_choice`. * - * Controls which (if any) function is called by the model. `none` means the model - * will not call a function and instead generates a message. `auto` means the model - * can pick between generating a message or calling a function. Specifying a - * particular function via `{"name": "my_function"}` forces the model to call that + * Controls which (if any) function is called by the model. + * + * `none` means the model will not call a function and instead generates a message. + * + * `auto` means the model can pick between generating a message or calling a * function. * + * Specifying a particular function via `{"name": "my_function"}` forces the model + * to call that function. + * * `none` is the default when no functions are present. `auto` is the default if * functions are present. */ @@ -998,17 +1057,21 @@ export interface ChatCompletionCreateParamsBase { * Number between -2.0 and 2.0. Positive values penalize new tokens based on * whether they appear in the text so far, increasing the model's likelihood to * talk about new topics. - * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ presence_penalty?: number | null; /** - * An object specifying the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), - * [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini), - * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + * **o1 models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + reasoning_effort?: ChatCompletionReasoningEffort; + + /** + * An object specifying the format that the model must output. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured * Outputs which ensures the model will match your supplied JSON schema. Learn more @@ -1088,9 +1151,8 @@ export interface ChatCompletionCreateParamsBase { /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will * make the output more random, while lower values like 0.2 will make it more - * focused and deterministic. - * - * We generally recommend altering this or `top_p` but not both. + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. */ temperature?: number | null; @@ -1223,6 +1285,7 @@ export declare namespace Completions { type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, type ChatCompletionMessage as ChatCompletionMessage, @@ -1231,6 +1294,7 @@ export declare namespace Completions { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index 262bf75a2..c3be19402 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -13,6 +13,7 @@ export { type ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal, type ChatCompletionContentPartText, + type ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam, type ChatCompletionMessage, @@ -21,6 +22,7 @@ export { type ChatCompletionModality, type ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent, + type ChatCompletionReasoningEffort, type ChatCompletionRole, type ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam, diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 0c320e028..44dd011aa 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -127,9 +127,8 @@ export interface FineTuningJob { finished_at: number | null; /** - * The hyperparameters used for the fine-tuning job. See the - * [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for - * more details. + * The hyperparameters used for the fine-tuning job. This value will only be + * returned when running `supervised` jobs. */ hyperparameters: FineTuningJob.Hyperparameters; @@ -195,6 +194,11 @@ export interface FineTuningJob { * A list of integrations to enable for this fine-tuning job. */ integrations?: Array | null; + + /** + * The method used for fine-tuning. + */ + method?: FineTuningJob.Method; } export namespace FineTuningJob { @@ -221,18 +225,125 @@ export namespace FineTuningJob { } /** - * The hyperparameters used for the fine-tuning job. See the - * [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for - * more details. + * The hyperparameters used for the fine-tuning job. This value will only be + * returned when running `supervised` jobs. */ export interface Hyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + /** * The number of epochs to train the model for. An epoch refers to one full cycle - * through the training dataset. "auto" decides the optimal number of epochs based - * on the size of the dataset. If setting the number manually, we support any - * number between 1 and 50 epochs. + * through the training dataset. + */ + n_epochs?: 'auto' | number; + } + + /** + * The method used for fine-tuning. + */ + export interface Method { + /** + * Configuration for the DPO fine-tuning method. + */ + dpo?: Method.Dpo; + + /** + * Configuration for the supervised fine-tuning method. */ - n_epochs: 'auto' | number; + supervised?: Method.Supervised; + + /** + * The type of method. Is either `supervised` or `dpo`. + */ + type?: 'supervised' | 'dpo'; + } + + export namespace Method { + /** + * Configuration for the DPO fine-tuning method. + */ + export interface Dpo { + /** + * The hyperparameters used for the fine-tuning job. + */ + hyperparameters?: Dpo.Hyperparameters; + } + + export namespace Dpo { + /** + * The hyperparameters used for the fine-tuning job. + */ + export interface Hyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * The beta value for the DPO method. A higher beta value will increase the weight + * of the penalty between the policy and reference model. + */ + beta?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; + } + } + + /** + * Configuration for the supervised fine-tuning method. + */ + export interface Supervised { + /** + * The hyperparameters used for the fine-tuning job. + */ + hyperparameters?: Supervised.Hyperparameters; + } + + export namespace Supervised { + /** + * The hyperparameters used for the fine-tuning job. + */ + export interface Hyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; + } + } } } @@ -240,15 +351,40 @@ export namespace FineTuningJob { * Fine-tuning job event object */ export interface FineTuningJobEvent { + /** + * The object identifier. + */ id: string; + /** + * The Unix timestamp (in seconds) for when the fine-tuning job was created. + */ created_at: number; + /** + * The log level of the event. + */ level: 'info' | 'warn' | 'error'; + /** + * The message of the event. + */ message: string; + /** + * The object type, which is always "fine_tuning.job.event". + */ object: 'fine_tuning.job.event'; + + /** + * The data associated with the event. + */ + data?: unknown; + + /** + * The type of event. + */ + type?: 'message' | 'metrics'; } export type FineTuningJobIntegration = FineTuningJobWandbIntegrationObject; @@ -318,8 +454,10 @@ export interface JobCreateParams { * your file with the purpose `fine-tune`. * * The contents of the file should differ depending on if the model uses the - * [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + * [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), * [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + * format, or if the fine-tuning method uses the + * [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) * format. * * See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) @@ -328,7 +466,8 @@ export interface JobCreateParams { training_file: string; /** - * The hyperparameters used for the fine-tuning job. + * The hyperparameters used for the fine-tuning job. This value is now deprecated + * in favor of `method`, and should be passed in under the `method` parameter. */ hyperparameters?: JobCreateParams.Hyperparameters; @@ -337,6 +476,11 @@ export interface JobCreateParams { */ integrations?: Array | null; + /** + * The method used for fine-tuning. + */ + method?: JobCreateParams.Method; + /** * The seed controls the reproducibility of the job. Passing in the same seed and * job parameters should produce the same results, but may differ in rare cases. If @@ -372,7 +516,9 @@ export interface JobCreateParams { export namespace JobCreateParams { /** - * The hyperparameters used for the fine-tuning job. + * @deprecated: The hyperparameters used for the fine-tuning job. This value is now + * deprecated in favor of `method`, and should be passed in under the `method` + * parameter. */ export interface Hyperparameters { /** @@ -444,6 +590,104 @@ export namespace JobCreateParams { tags?: Array; } } + + /** + * The method used for fine-tuning. + */ + export interface Method { + /** + * Configuration for the DPO fine-tuning method. + */ + dpo?: Method.Dpo; + + /** + * Configuration for the supervised fine-tuning method. + */ + supervised?: Method.Supervised; + + /** + * The type of method. Is either `supervised` or `dpo`. + */ + type?: 'supervised' | 'dpo'; + } + + export namespace Method { + /** + * Configuration for the DPO fine-tuning method. + */ + export interface Dpo { + /** + * The hyperparameters used for the fine-tuning job. + */ + hyperparameters?: Dpo.Hyperparameters; + } + + export namespace Dpo { + /** + * The hyperparameters used for the fine-tuning job. + */ + export interface Hyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * The beta value for the DPO method. A higher beta value will increase the weight + * of the penalty between the policy and reference model. + */ + beta?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; + } + } + + /** + * Configuration for the supervised fine-tuning method. + */ + export interface Supervised { + /** + * The hyperparameters used for the fine-tuning job. + */ + hyperparameters?: Supervised.Hyperparameters; + } + + export namespace Supervised { + /** + * The hyperparameters used for the fine-tuning job. + */ + export interface Hyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; + } + } + } } export interface JobListParams extends CursorPageParams {} diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index 5dcbf9ad6..dfc09f69b 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -11,7 +11,7 @@ const client = new OpenAI({ describe('resource completions', () => { test('create: only required params', async () => { const responsePromise = client.chat.completions.create({ - messages: [{ content: 'string', role: 'system' }], + messages: [{ content: 'string', role: 'developer' }], model: 'gpt-4o', }); const rawResponse = await responsePromise.asResponse(); @@ -25,7 +25,7 @@ describe('resource completions', () => { test('create: required and optional params', async () => { const response = await client.chat.completions.create({ - messages: [{ content: 'string', role: 'system', name: 'name' }], + messages: [{ content: 'string', role: 'developer', name: 'name' }], model: 'gpt-4o', audio: { format: 'wav', voice: 'alloy' }, frequency_penalty: -2, @@ -41,6 +41,7 @@ describe('resource completions', () => { parallel_tool_calls: true, prediction: { content: 'string', type: 'content' }, presence_penalty: -2, + reasoning_effort: 'low', response_format: { type: 'text' }, seed: -9007199254740991, service_tier: 'auto', diff --git a/tests/api-resources/fine-tuning/jobs/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts index 0ab09768a..4de83a8b7 100644 --- a/tests/api-resources/fine-tuning/jobs/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -34,6 +34,20 @@ describe('resource jobs', () => { wandb: { project: 'my-wandb-project', entity: 'entity', name: 'name', tags: ['custom-tag'] }, }, ], + method: { + dpo: { + hyperparameters: { + batch_size: 'auto', + beta: 'auto', + learning_rate_multiplier: 'auto', + n_epochs: 'auto', + }, + }, + supervised: { + hyperparameters: { batch_size: 'auto', learning_rate_multiplier: 'auto', n_epochs: 'auto' }, + }, + type: 'supervised', + }, seed: 42, suffix: 'x', validation_file: 'file-abc123', From 13fa61db5e3407be684868db488c46d95f6805bf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 18:05:03 +0000 Subject: [PATCH 351/533] chore(internal): spec update (#1230) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index d4d7d3c40..7b5235e3c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-779ea2754025daf5e18eb8ceb203ec321692636bc3a999338556a479178efa6c.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0d64ca9e45f51b4279f87b205eeb3a3576df98407698ce053f2e2302c1c08df1.yml From 21c3ed6db585433880c2462b3c62abcbe7ec6903 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 18:05:30 +0000 Subject: [PATCH 352/533] release: 4.77.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 52c31fe71..6b843f931 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.76.3" + ".": "4.77.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b6f57fe4..d33ce4c1a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.77.0 (2024-12-17) + +Full Changelog: [v4.76.3...v4.77.0](https://github.com/openai/openai-node/compare/v4.76.3...v4.77.0) + +### Features + +* **api:** new o1 and GPT-4o models + preference fine-tuning ([#1229](https://github.com/openai/openai-node/issues/1229)) ([2e872d4](https://github.com/openai/openai-node/commit/2e872d4ac3717ab8f61741efffb7a31acd798338)) + + +### Chores + +* **internal:** fix some typos ([#1227](https://github.com/openai/openai-node/issues/1227)) ([d51fcfe](https://github.com/openai/openai-node/commit/d51fcfe3a66550a684eeeb0e6f17e1d9825cdf78)) +* **internal:** spec update ([#1230](https://github.com/openai/openai-node/issues/1230)) ([ed2b61d](https://github.com/openai/openai-node/commit/ed2b61d32703b64d9f91223bc02627a607f60483)) + ## 4.76.3 (2024-12-13) Full Changelog: [v4.76.2...v4.76.3](https://github.com/openai/openai-node/compare/v4.76.2...v4.76.3) diff --git a/jsr.json b/jsr.json index ef9ce6848..d76a2040e 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.76.3", + "version": "4.77.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 47f363ba1..54633aa5d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.76.3", + "version": "4.77.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 01cd56405..fdf4e5224 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.76.3'; // x-release-please-version +export const VERSION = '4.77.0'; // x-release-please-version From d70f6e835be4cef980e8e4026ec709177d3d3931 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Dec 2024 22:16:46 +0000 Subject: [PATCH 353/533] chore(internal): spec update (#1231) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 7b5235e3c..248cc366d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0d64ca9e45f51b4279f87b205eeb3a3576df98407698ce053f2e2302c1c08df1.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-a39aca84ed97ebafb707ebd5221e2787c5a42ff3d98f2ffaea8a0dcd84cbcbcb.yml From 0f715f281715da744e01c2c08932008d0cfde614 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 22:07:12 +0000 Subject: [PATCH 354/533] fix(client): normalize method (#1235) --- src/core.ts | 12 +++++++++++- tests/index.test.ts | 13 +++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/core.ts b/src/core.ts index 68f1e676a..972cceaec 100644 --- a/src/core.ts +++ b/src/core.ts @@ -557,9 +557,19 @@ export abstract class APIClient { const timeout = setTimeout(() => controller.abort(), ms); + const fetchOptions = { + signal: controller.signal as any, + ...options, + }; + if (fetchOptions.method) { + // Custom methods like 'patch' need to be uppercased + // See https://github.com/nodejs/undici/issues/2294 + fetchOptions.method = fetchOptions.method.toUpperCase(); + } + return ( // use undefined this binding; fetch errors if bound to something else in browser/cloudflare - this.fetch.call(undefined, url, { signal: controller.signal as any, ...options }).finally(() => { + this.fetch.call(undefined, url, fetchOptions).finally(() => { clearTimeout(timeout); }) ); diff --git a/tests/index.test.ts b/tests/index.test.ts index bf113e7bb..a6f0040a4 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -122,6 +122,19 @@ describe('instantiate client', () => { expect(spy).toHaveBeenCalledTimes(1); }); + test('normalized method', async () => { + let capturedRequest: RequestInit | undefined; + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { + capturedRequest = init; + return new Response(JSON.stringify({}), { headers: { 'Content-Type': 'application/json' } }); + }; + + const client = new OpenAI({ baseURL: 'http://localhost:5000/', apiKey: 'My API Key', fetch: testFetch }); + + await client.patch('/foo'); + expect(capturedRequest?.method).toEqual('PATCH'); + }); + describe('baseUrl', () => { test('trailing slash', () => { const client = new OpenAI({ baseURL: 'http://localhost:5000/custom/path/', apiKey: 'My API Key' }); From 4df92af7cace12c2134fbfb3db1ed5887dec0a4c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 20 Dec 2024 11:54:17 +0000 Subject: [PATCH 355/533] docs: minor formatting changes (#1236) --- CONTRIBUTING.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e8bbc1b07..dde09d52d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ ## Setting up the environment -This repository uses [`yarn@v1`](https://classic.yarnpkg.com/lang/en/docs/install/#mac-stable). +This repository uses [`yarn@v1`](https://classic.yarnpkg.com/lang/en/docs/install). Other package managers may work but are not officially supported for development. To set up the repository, run: @@ -29,10 +29,10 @@ All files in the `examples/` directory are not modified by the generator and can … ``` -``` -chmod +x examples/.ts +```sh +$ chmod +x examples/.ts # run the example against your api -yarn tsn -T examples/.ts +$ yarn tsn -T examples/.ts ``` ## Using the repository from source From b6e4d947c69d255cd332bf247a2faedf578438c4 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 20 Dec 2024 21:28:36 +0000 Subject: [PATCH 356/533] docs(readme): add alpha callout --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index b03bcd870..c926688f0 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,9 @@ +> [!IMPORTANT] +> We're actively working on a new alpha version that migrates from `node-fetch` to builtin fetch. +> +> Please try it out and let us know if you run into any issues! +> https://community.openai.com/t/your-feedback-requested-node-js-sdk-5-0-0-alpha/1063774 + # OpenAI TypeScript and JavaScript API Library [![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) [![JSR Version](https://jsr.io/badges/@openai/openai)](https://jsr.io/@openai/openai) From 3fdc7d4f67a6ceea51723684dcc0bc1895088259 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 21 Dec 2024 05:06:15 +0000 Subject: [PATCH 357/533] release: 4.77.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6b843f931..40491ea3b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.77.0" + ".": "4.77.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d33ce4c1a..e2ed8756c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 4.77.1 (2024-12-21) + +Full Changelog: [v4.77.0...v4.77.1](https://github.com/openai/openai-node/compare/v4.77.0...v4.77.1) + +### Bug Fixes + +* **client:** normalize method ([#1235](https://github.com/openai/openai-node/issues/1235)) ([4a213da](https://github.com/openai/openai-node/commit/4a213dad6f2104dc02a75724acc62134d25db472)) + + +### Chores + +* **internal:** spec update ([#1231](https://github.com/openai/openai-node/issues/1231)) ([a97ea73](https://github.com/openai/openai-node/commit/a97ea73cafcb56e94be7ff691c4022da575cf60e)) + + +### Documentation + +* minor formatting changes ([#1236](https://github.com/openai/openai-node/issues/1236)) ([6387968](https://github.com/openai/openai-node/commit/63879681ccaca3dc1e17b27464e2f830b8f63b4f)) +* **readme:** add alpha callout ([f2eff37](https://github.com/openai/openai-node/commit/f2eff3780e1216f7f420f7b86d47f4e21986b10e)) + ## 4.77.0 (2024-12-17) Full Changelog: [v4.76.3...v4.77.0](https://github.com/openai/openai-node/compare/v4.76.3...v4.77.0) diff --git a/jsr.json b/jsr.json index d76a2040e..f80d0a575 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.77.0", + "version": "4.77.1", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 54633aa5d..44030acd3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.77.0", + "version": "4.77.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index fdf4e5224..a7b84d0c2 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.77.0'; // x-release-please-version +export const VERSION = '4.77.1'; // x-release-please-version From c0ae6fc24957b1c3962b75f1d17c0d85ea2b298c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 2 Jan 2025 01:44:21 +0000 Subject: [PATCH 358/533] chore: bump license year (#1246) --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 621a6becf..f011417af 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2024 OpenAI + Copyright 2025 OpenAI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 9acdf8a3c44a4cce785b9dfa9efd32f8c47fa6d5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 2 Jan 2025 05:06:49 +0000 Subject: [PATCH 359/533] release: 4.77.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 40491ea3b..c5f60a579 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.77.1" + ".": "4.77.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e2ed8756c..d5143492e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.77.2 (2025-01-02) + +Full Changelog: [v4.77.1...v4.77.2](https://github.com/openai/openai-node/compare/v4.77.1...v4.77.2) + +### Chores + +* bump license year ([#1246](https://github.com/openai/openai-node/issues/1246)) ([13197c1](https://github.com/openai/openai-node/commit/13197c1698f492529bd00b62d95f83c039ef0ac7)) + ## 4.77.1 (2024-12-21) Full Changelog: [v4.77.0...v4.77.1](https://github.com/openai/openai-node/compare/v4.77.0...v4.77.1) diff --git a/jsr.json b/jsr.json index f80d0a575..6e735f0b0 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.77.1", + "version": "4.77.2", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 44030acd3..947aad8d4 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.77.1", + "version": "4.77.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index a7b84d0c2..e1984f01e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.77.1'; // x-release-please-version +export const VERSION = '4.77.2'; // x-release-please-version From db16121d2db5a3104c2b9d85c6b7b3281f6f6299 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 3 Jan 2025 22:57:05 +0000 Subject: [PATCH 360/533] chore(api): bump spec version (#1248) --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 248cc366d..d223c8f1f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-a39aca84ed97ebafb707ebd5221e2787c5a42ff3d98f2ffaea8a0dcd84cbcbcb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-02200a58ed631064b6419711da99fefd6e97bdbbeb577a80a1a6e0c8dbcb18f5.yml From f4066e1af907586946a5e6befee9459268425680 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 3 Jan 2025 22:57:37 +0000 Subject: [PATCH 361/533] release: 4.77.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c5f60a579..e98ace9d7 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.77.2" + ".": "4.77.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d5143492e..1f928b366 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.77.3 (2025-01-03) + +Full Changelog: [v4.77.2...v4.77.3](https://github.com/openai/openai-node/compare/v4.77.2...v4.77.3) + +### Chores + +* **api:** bump spec version ([#1248](https://github.com/openai/openai-node/issues/1248)) ([37b3df9](https://github.com/openai/openai-node/commit/37b3df9ac6af76fea6eace8307aab9f0565e5660)) + ## 4.77.2 (2025-01-02) Full Changelog: [v4.77.1...v4.77.2](https://github.com/openai/openai-node/compare/v4.77.1...v4.77.2) diff --git a/jsr.json b/jsr.json index 6e735f0b0..57eb55bf8 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.77.2", + "version": "4.77.3", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 947aad8d4..2ad833206 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.77.2", + "version": "4.77.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index e1984f01e..81ee8f0d6 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.77.2'; // x-release-please-version +export const VERSION = '4.77.3'; // x-release-please-version From 017d6010d40e84ea390293a46485beefcc8d386c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 15:22:26 +0000 Subject: [PATCH 362/533] docs(readme): fix misplaced period (#1252) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c926688f0..3039857a1 100644 --- a/README.md +++ b/README.md @@ -631,7 +631,7 @@ await client.models.list({ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: 1. Changes that only affect static types, without breaking runtime behavior. -2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_. +2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals.)_ 3. Changes that we do not expect to impact the vast majority of users in practice. We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. From bb6ac193b7d9d45155d7e7bc93d40ea0a79645cc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 15:23:01 +0000 Subject: [PATCH 363/533] release: 4.77.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e98ace9d7..e66c326a9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.77.3" + ".": "4.77.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f928b366..7a811f188 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.77.4 (2025-01-08) + +Full Changelog: [v4.77.3...v4.77.4](https://github.com/openai/openai-node/compare/v4.77.3...v4.77.4) + +### Documentation + +* **readme:** fix misplaced period ([#1252](https://github.com/openai/openai-node/issues/1252)) ([c2fe465](https://github.com/openai/openai-node/commit/c2fe46522d59d1611ba8bb2b7e070f9be7264df0)) + ## 4.77.3 (2025-01-03) Full Changelog: [v4.77.2...v4.77.3](https://github.com/openai/openai-node/compare/v4.77.2...v4.77.3) diff --git a/jsr.json b/jsr.json index 57eb55bf8..da442da31 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.77.3", + "version": "4.77.4", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 2ad833206..453859b6b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.77.3", + "version": "4.77.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 81ee8f0d6..7f6adc9bc 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.77.3'; // x-release-please-version +export const VERSION = '4.77.4'; // x-release-please-version From d3736066a0a277ba544617cbf8d2ea057a9f0ecf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 16:58:16 +0000 Subject: [PATCH 364/533] feat(client): add realtime types (#1254) note this just defines types, there is no websocket interface provided yet --- .stats.yml | 4 +- api.md | 60 + src/resources/beta/beta.ts | 6 + src/resources/beta/index.ts | 1 + src/resources/beta/realtime/index.ts | 4 + src/resources/beta/realtime/realtime.ts | 1904 +++++++++++++++++ src/resources/beta/realtime/sessions.ts | 546 +++++ .../beta/realtime/sessions.test.ts | 45 + 8 files changed, 2568 insertions(+), 2 deletions(-) create mode 100644 src/resources/beta/realtime/index.ts create mode 100644 src/resources/beta/realtime/realtime.ts create mode 100644 src/resources/beta/realtime/sessions.ts create mode 100644 tests/api-resources/beta/realtime/sessions.test.ts diff --git a/.stats.yml b/.stats.yml index d223c8f1f..9600edae3 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-02200a58ed631064b6419711da99fefd6e97bdbbeb577a80a1a6e0c8dbcb18f5.yml +configured_endpoints: 69 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b5b0e2c794b012919701c3fd43286af10fa25d33ceb8a881bec2636028f446e0.yml diff --git a/api.md b/api.md index 54bcf08d7..a885628a3 100644 --- a/api.md +++ b/api.md @@ -213,6 +213,66 @@ Methods: # Beta +## Realtime + +Types: + +- ConversationCreatedEvent +- ConversationItem +- ConversationItemContent +- ConversationItemCreateEvent +- ConversationItemCreatedEvent +- ConversationItemDeleteEvent +- ConversationItemDeletedEvent +- ConversationItemInputAudioTranscriptionCompletedEvent +- ConversationItemInputAudioTranscriptionFailedEvent +- ConversationItemTruncateEvent +- ConversationItemTruncatedEvent +- ErrorEvent +- InputAudioBufferAppendEvent +- InputAudioBufferClearEvent +- InputAudioBufferClearedEvent +- InputAudioBufferCommitEvent +- InputAudioBufferCommittedEvent +- InputAudioBufferSpeechStartedEvent +- InputAudioBufferSpeechStoppedEvent +- RateLimitsUpdatedEvent +- RealtimeClientEvent +- RealtimeResponse +- RealtimeResponseStatus +- RealtimeResponseUsage +- RealtimeServerEvent +- ResponseAudioDeltaEvent +- ResponseAudioDoneEvent +- ResponseAudioTranscriptDeltaEvent +- ResponseAudioTranscriptDoneEvent +- ResponseCancelEvent +- ResponseContentPartAddedEvent +- ResponseContentPartDoneEvent +- ResponseCreateEvent +- ResponseCreatedEvent +- ResponseDoneEvent +- ResponseFunctionCallArgumentsDeltaEvent +- ResponseFunctionCallArgumentsDoneEvent +- ResponseOutputItemAddedEvent +- ResponseOutputItemDoneEvent +- ResponseTextDeltaEvent +- ResponseTextDoneEvent +- SessionCreatedEvent +- SessionUpdateEvent +- SessionUpdatedEvent + +### Sessions + +Types: + +- Session +- SessionCreateResponse + +Methods: + +- client.beta.realtime.sessions.create({ ...params }) -> SessionCreateResponse + ## VectorStores Types: diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index b904abe4a..ccd043243 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -21,6 +21,8 @@ import { RunStreamEvent, ThreadStreamEvent, } from './assistants'; +import * as RealtimeAPI from './realtime/realtime'; +import { Realtime } from './realtime/realtime'; import * as ThreadsAPI from './threads/threads'; import { AssistantResponseFormatOption, @@ -58,12 +60,14 @@ import { import { Chat } from './chat/chat'; export class Beta extends APIResource { + realtime: RealtimeAPI.Realtime = new RealtimeAPI.Realtime(this._client); vectorStores: VectorStoresAPI.VectorStores = new VectorStoresAPI.VectorStores(this._client); chat: ChatAPI.Chat = new ChatAPI.Chat(this._client); assistants: AssistantsAPI.Assistants = new AssistantsAPI.Assistants(this._client); threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this._client); } +Beta.Realtime = Realtime; Beta.VectorStores = VectorStores; Beta.VectorStoresPage = VectorStoresPage; Beta.Assistants = Assistants; @@ -71,6 +75,8 @@ Beta.AssistantsPage = AssistantsPage; Beta.Threads = Threads; export declare namespace Beta { + export { Realtime as Realtime }; + export { VectorStores as VectorStores, type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index d7111288f..aa2e52d4c 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -19,6 +19,7 @@ export { type AssistantListParams, } from './assistants'; export { Beta } from './beta'; +export { Realtime } from './realtime/index'; export { Chat } from './chat/index'; export { Threads, diff --git a/src/resources/beta/realtime/index.ts b/src/resources/beta/realtime/index.ts new file mode 100644 index 000000000..66c3ecaae --- /dev/null +++ b/src/resources/beta/realtime/index.ts @@ -0,0 +1,4 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Realtime } from './realtime'; +export { Sessions, type Session, type SessionCreateResponse, type SessionCreateParams } from './sessions'; diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts new file mode 100644 index 000000000..5de06917a --- /dev/null +++ b/src/resources/beta/realtime/realtime.ts @@ -0,0 +1,1904 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as RealtimeAPI from './realtime'; +import * as SessionsAPI from './sessions'; +import { + Session as SessionsAPISession, + SessionCreateParams, + SessionCreateResponse, + Sessions, +} from './sessions'; + +export class Realtime extends APIResource { + sessions: SessionsAPI.Sessions = new SessionsAPI.Sessions(this._client); +} + +/** + * Returned when a conversation is created. Emitted right after session creation. + */ +export interface ConversationCreatedEvent { + /** + * The conversation resource. + */ + conversation: ConversationCreatedEvent.Conversation; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The event type, must be `conversation.created`. + */ + type: 'conversation.created'; +} + +export namespace ConversationCreatedEvent { + /** + * The conversation resource. + */ + export interface Conversation { + /** + * The unique ID of the conversation. + */ + id?: string; + + /** + * The object type, must be `realtime.conversation`. + */ + object?: 'realtime.conversation'; + } +} + +/** + * The item to add to the conversation. + */ +export interface ConversationItem { + /** + * The unique ID of the item, this can be generated by the client to help manage + * server-side context, but is not required because the server will generate one if + * not provided. + */ + id?: string; + + /** + * The arguments of the function call (for `function_call` items). + */ + arguments?: string; + + /** + * The ID of the function call (for `function_call` and `function_call_output` + * items). If passed on a `function_call_output` item, the server will check that a + * `function_call` item with the same ID exists in the conversation history. + */ + call_id?: string; + + /** + * The content of the message, applicable for `message` items. + * + * - Message items of role `system` support only `input_text` content + * - Message items of role `user` support `input_text` and `input_audio` content + * - Message items of role `assistant` support `text` content. + */ + content?: Array; + + /** + * The name of the function being called (for `function_call` items). + */ + name?: string; + + /** + * Identifier for the API object being returned - always `realtime.item`. + */ + object?: 'realtime.item'; + + /** + * The output of the function call (for `function_call_output` items). + */ + output?: string; + + /** + * The role of the message sender (`user`, `assistant`, `system`), only applicable + * for `message` items. + */ + role?: 'user' | 'assistant' | 'system'; + + /** + * The status of the item (`completed`, `incomplete`). These have no effect on the + * conversation, but are accepted for consistency with the + * `conversation.item.created` event. + */ + status?: 'completed' | 'incomplete'; + + /** + * The type of the item (`message`, `function_call`, `function_call_output`). + */ + type?: 'message' | 'function_call' | 'function_call_output'; +} + +export interface ConversationItemContent { + /** + * ID of a previous conversation item to reference (for `item_reference` content + * types in `response.create` events). These can reference both client and server + * created items. + */ + id?: string; + + /** + * Base64-encoded audio bytes, used for `input_audio` content type. + */ + audio?: string; + + /** + * The text content, used for `input_text` and `text` content types. + */ + text?: string; + + /** + * The transcript of the audio, used for `input_audio` content type. + */ + transcript?: string; + + /** + * The content type (`input_text`, `input_audio`, `item_reference`, `text`). + */ + type?: 'input_text' | 'input_audio' | 'item_reference' | 'text'; +} + +/** + * Add a new Item to the Conversation's context, including messages, function + * calls, and function call responses. This event can be used both to populate a + * "history" of the conversation and to add new items mid-stream, but has the + * current limitation that it cannot populate assistant audio messages. + * + * If successful, the server will respond with a `conversation.item.created` event, + * otherwise an `error` event will be sent. + */ +export interface ConversationItemCreateEvent { + /** + * The item to add to the conversation. + */ + item: ConversationItem; + + /** + * The event type, must be `conversation.item.create`. + */ + type: 'conversation.item.create'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; + + /** + * The ID of the preceding item after which the new item will be inserted. If not + * set, the new item will be appended to the end of the conversation. If set, it + * allows an item to be inserted mid-conversation. If the ID cannot be found, an + * error will be returned and the item will not be added. + */ + previous_item_id?: string; +} + +/** + * Returned when a conversation item is created. There are several scenarios that + * produce this event: + * + * - The server is generating a Response, which if successful will produce either + * one or two Items, which will be of type `message` (role `assistant`) or type + * `function_call`. + * - The input audio buffer has been committed, either by the client or the server + * (in `server_vad` mode). The server will take the content of the input audio + * buffer and add it to a new user message Item. + * - The client has sent a `conversation.item.create` event to add a new Item to + * the Conversation. + */ +export interface ConversationItemCreatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: ConversationItem; + + /** + * The ID of the preceding item in the Conversation context, allows the client to + * understand the order of the conversation. + */ + previous_item_id: string; + + /** + * The event type, must be `conversation.item.created`. + */ + type: 'conversation.item.created'; +} + +/** + * Send this event when you want to remove any item from the conversation history. + * The server will respond with a `conversation.item.deleted` event, unless the + * item does not exist in the conversation history, in which case the server will + * respond with an error. + */ +export interface ConversationItemDeleteEvent { + /** + * The ID of the item to delete. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.delete`. + */ + type: 'conversation.item.delete'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Returned when an item in the conversation is deleted by the client with a + * `conversation.item.delete` event. This event is used to synchronize the server's + * understanding of the conversation history with the client's view. + */ +export interface ConversationItemDeletedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item that was deleted. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.deleted`. + */ + type: 'conversation.item.deleted'; +} + +/** + * This event is the output of audio transcription for user audio written to the + * user audio buffer. Transcription begins when the input audio buffer is committed + * by the client or server (in `server_vad` mode). Transcription runs + * asynchronously with Response creation, so this event may come before or after + * the Response events. + * + * Realtime API models accept audio natively, and thus input transcription is a + * separate process run on a separate ASR (Automatic Speech Recognition) model, + * currently always `whisper-1`. Thus the transcript may diverge somewhat from the + * model's interpretation, and should be treated as a rough guide. + */ +export interface ConversationItemInputAudioTranscriptionCompletedEvent { + /** + * The index of the content part containing the audio. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item containing the audio. + */ + item_id: string; + + /** + * The transcribed text. + */ + transcript: string; + + /** + * The event type, must be `conversation.item.input_audio_transcription.completed`. + */ + type: 'conversation.item.input_audio_transcription.completed'; +} + +/** + * Returned when input audio transcription is configured, and a transcription + * request for a user message failed. These events are separate from other `error` + * events so that the client can identify the related Item. + */ +export interface ConversationItemInputAudioTranscriptionFailedEvent { + /** + * The index of the content part containing the audio. + */ + content_index: number; + + /** + * Details of the transcription error. + */ + error: ConversationItemInputAudioTranscriptionFailedEvent.Error; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.input_audio_transcription.failed`. + */ + type: 'conversation.item.input_audio_transcription.failed'; +} + +export namespace ConversationItemInputAudioTranscriptionFailedEvent { + /** + * Details of the transcription error. + */ + export interface Error { + /** + * Error code, if any. + */ + code?: string; + + /** + * A human-readable error message. + */ + message?: string; + + /** + * Parameter related to the error, if any. + */ + param?: string; + + /** + * The type of error. + */ + type?: string; + } +} + +/** + * Send this event to truncate a previous assistant message’s audio. The server + * will produce audio faster than realtime, so this event is useful when the user + * interrupts to truncate audio that has already been sent to the client but not + * yet played. This will synchronize the server's understanding of the audio with + * the client's playback. + * + * Truncating audio will delete the server-side text transcript to ensure there is + * not text in the context that hasn't been heard by the user. + * + * If successful, the server will respond with a `conversation.item.truncated` + * event. + */ +export interface ConversationItemTruncateEvent { + /** + * Inclusive duration up to which audio is truncated, in milliseconds. If the + * audio_end_ms is greater than the actual audio duration, the server will respond + * with an error. + */ + audio_end_ms: number; + + /** + * The index of the content part to truncate. Set this to 0. + */ + content_index: number; + + /** + * The ID of the assistant message item to truncate. Only assistant message items + * can be truncated. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.truncate`. + */ + type: 'conversation.item.truncate'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Returned when an earlier assistant audio message item is truncated by the client + * with a `conversation.item.truncate` event. This event is used to synchronize the + * server's understanding of the audio with the client's playback. + * + * This action will truncate the audio and remove the server-side text transcript + * to ensure there is no text in the context that hasn't been heard by the user. + */ +export interface ConversationItemTruncatedEvent { + /** + * The duration up to which the audio was truncated, in milliseconds. + */ + audio_end_ms: number; + + /** + * The index of the content part that was truncated. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the assistant message item that was truncated. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.truncated`. + */ + type: 'conversation.item.truncated'; +} + +/** + * Returned when an error occurs, which could be a client problem or a server + * problem. Most errors are recoverable and the session will stay open, we + * recommend to implementors to monitor and log error messages by default. + */ +export interface ErrorEvent { + /** + * Details of the error. + */ + error: ErrorEvent.Error; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The event type, must be `error`. + */ + type: 'error'; +} + +export namespace ErrorEvent { + /** + * Details of the error. + */ + export interface Error { + /** + * A human-readable error message. + */ + message: string; + + /** + * The type of error (e.g., "invalid_request_error", "server_error"). + */ + type: string; + + /** + * Error code, if any. + */ + code?: string | null; + + /** + * The event_id of the client event that caused the error, if applicable. + */ + event_id?: string | null; + + /** + * Parameter related to the error, if any. + */ + param?: string | null; + } +} + +/** + * Send this event to append audio bytes to the input audio buffer. The audio + * buffer is temporary storage you can write to and later commit. In Server VAD + * mode, the audio buffer is used to detect speech and the server will decide when + * to commit. When Server VAD is disabled, you must commit the audio buffer + * manually. + * + * The client may choose how much audio to place in each event up to a maximum of + * 15 MiB, for example streaming smaller chunks from the client may allow the VAD + * to be more responsive. Unlike made other client events, the server will not send + * a confirmation response to this event. + */ +export interface InputAudioBufferAppendEvent { + /** + * Base64-encoded audio bytes. This must be in the format specified by the + * `input_audio_format` field in the session configuration. + */ + audio: string; + + /** + * The event type, must be `input_audio_buffer.append`. + */ + type: 'input_audio_buffer.append'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Send this event to clear the audio bytes in the buffer. The server will respond + * with an `input_audio_buffer.cleared` event. + */ +export interface InputAudioBufferClearEvent { + /** + * The event type, must be `input_audio_buffer.clear`. + */ + type: 'input_audio_buffer.clear'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Returned when the input audio buffer is cleared by the client with a + * `input_audio_buffer.clear` event. + */ +export interface InputAudioBufferClearedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The event type, must be `input_audio_buffer.cleared`. + */ + type: 'input_audio_buffer.cleared'; +} + +/** + * Send this event to commit the user input audio buffer, which will create a new + * user message item in the conversation. This event will produce an error if the + * input audio buffer is empty. When in Server VAD mode, the client does not need + * to send this event, the server will commit the audio buffer automatically. + * + * Committing the input audio buffer will trigger input audio transcription (if + * enabled in session configuration), but it will not create a response from the + * model. The server will respond with an `input_audio_buffer.committed` event. + */ +export interface InputAudioBufferCommitEvent { + /** + * The event type, must be `input_audio_buffer.commit`. + */ + type: 'input_audio_buffer.commit'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Returned when an input audio buffer is committed, either by the client or + * automatically in server VAD mode. The `item_id` property is the ID of the user + * message item that will be created, thus a `conversation.item.created` event will + * also be sent to the client. + */ +export interface InputAudioBufferCommittedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item that will be created. + */ + item_id: string; + + /** + * The ID of the preceding item after which the new item will be inserted. + */ + previous_item_id: string; + + /** + * The event type, must be `input_audio_buffer.committed`. + */ + type: 'input_audio_buffer.committed'; +} + +/** + * Sent by the server when in `server_vad` mode to indicate that speech has been + * detected in the audio buffer. This can happen any time audio is added to the + * buffer (unless speech is already detected). The client may want to use this + * event to interrupt audio playback or provide visual feedback to the user. + * + * The client should expect to receive a `input_audio_buffer.speech_stopped` event + * when speech stops. The `item_id` property is the ID of the user message item + * that will be created when speech stops and will also be included in the + * `input_audio_buffer.speech_stopped` event (unless the client manually commits + * the audio buffer during VAD activation). + */ +export interface InputAudioBufferSpeechStartedEvent { + /** + * Milliseconds from the start of all audio written to the buffer during the + * session when speech was first detected. This will correspond to the beginning of + * audio sent to the model, and thus includes the `prefix_padding_ms` configured in + * the Session. + */ + audio_start_ms: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item that will be created when speech stops. + */ + item_id: string; + + /** + * The event type, must be `input_audio_buffer.speech_started`. + */ + type: 'input_audio_buffer.speech_started'; +} + +/** + * Returned in `server_vad` mode when the server detects the end of speech in the + * audio buffer. The server will also send an `conversation.item.created` event + * with the user message item that is created from the audio buffer. + */ +export interface InputAudioBufferSpeechStoppedEvent { + /** + * Milliseconds since the session started when speech stopped. This will correspond + * to the end of audio sent to the model, and thus includes the + * `min_silence_duration_ms` configured in the Session. + */ + audio_end_ms: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item that will be created. + */ + item_id: string; + + /** + * The event type, must be `input_audio_buffer.speech_stopped`. + */ + type: 'input_audio_buffer.speech_stopped'; +} + +/** + * Emitted at the beginning of a Response to indicate the updated rate limits. When + * a Response is created some tokens will be "reserved" for the output tokens, the + * rate limits shown here reflect that reservation, which is then adjusted + * accordingly once the Response is completed. + */ +export interface RateLimitsUpdatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * List of rate limit information. + */ + rate_limits: Array; + + /** + * The event type, must be `rate_limits.updated`. + */ + type: 'rate_limits.updated'; +} + +export namespace RateLimitsUpdatedEvent { + export interface RateLimit { + /** + * The maximum allowed value for the rate limit. + */ + limit?: number; + + /** + * The name of the rate limit (`requests`, `tokens`). + */ + name?: 'requests' | 'tokens'; + + /** + * The remaining value before the limit is reached. + */ + remaining?: number; + + /** + * Seconds until the rate limit resets. + */ + reset_seconds?: number; + } +} + +/** + * All events that the client can send to the Realtime API + */ +export type RealtimeClientEvent = + | SessionUpdateEvent + | InputAudioBufferAppendEvent + | InputAudioBufferCommitEvent + | InputAudioBufferClearEvent + | ConversationItemCreateEvent + | ConversationItemTruncateEvent + | ConversationItemDeleteEvent + | ResponseCreateEvent + | ResponseCancelEvent; + +/** + * The response resource. + */ +export interface RealtimeResponse { + /** + * The unique ID of the response. + */ + id?: string; + + /** + * Developer-provided string key-value pairs associated with this response. + */ + metadata?: unknown | null; + + /** + * The object type, must be `realtime.response`. + */ + object?: 'realtime.response'; + + /** + * The list of output items generated by the response. + */ + output?: Array; + + /** + * The final status of the response (`completed`, `cancelled`, `failed`, or + * `incomplete`). + */ + status?: 'completed' | 'cancelled' | 'failed' | 'incomplete'; + + /** + * Additional details about the status. + */ + status_details?: RealtimeResponseStatus; + + /** + * Usage statistics for the Response, this will correspond to billing. A Realtime + * API session will maintain a conversation context and append new Items to the + * Conversation, thus output from previous turns (text and audio tokens) will + * become the input for later turns. + */ + usage?: RealtimeResponseUsage; +} + +/** + * Additional details about the status. + */ +export interface RealtimeResponseStatus { + /** + * A description of the error that caused the response to fail, populated when the + * `status` is `failed`. + */ + error?: RealtimeResponseStatus.Error; + + /** + * The reason the Response did not complete. For a `cancelled` Response, one of + * `turn_detected` (the server VAD detected a new start of speech) or + * `client_cancelled` (the client sent a cancel event). For an `incomplete` + * Response, one of `max_output_tokens` or `content_filter` (the server-side safety + * filter activated and cut off the response). + */ + reason?: 'turn_detected' | 'client_cancelled' | 'max_output_tokens' | 'content_filter'; + + /** + * The type of error that caused the response to fail, corresponding with the + * `status` field (`completed`, `cancelled`, `incomplete`, `failed`). + */ + type?: 'completed' | 'cancelled' | 'incomplete' | 'failed'; +} + +export namespace RealtimeResponseStatus { + /** + * A description of the error that caused the response to fail, populated when the + * `status` is `failed`. + */ + export interface Error { + /** + * Error code, if any. + */ + code?: string; + + /** + * The type of error. + */ + type?: string; + } +} + +/** + * Usage statistics for the Response, this will correspond to billing. A Realtime + * API session will maintain a conversation context and append new Items to the + * Conversation, thus output from previous turns (text and audio tokens) will + * become the input for later turns. + */ +export interface RealtimeResponseUsage { + /** + * Details about the input tokens used in the Response. + */ + input_token_details?: RealtimeResponseUsage.InputTokenDetails; + + /** + * The number of input tokens used in the Response, including text and audio + * tokens. + */ + input_tokens?: number; + + /** + * Details about the output tokens used in the Response. + */ + output_token_details?: RealtimeResponseUsage.OutputTokenDetails; + + /** + * The number of output tokens sent in the Response, including text and audio + * tokens. + */ + output_tokens?: number; + + /** + * The total number of tokens in the Response including input and output text and + * audio tokens. + */ + total_tokens?: number; +} + +export namespace RealtimeResponseUsage { + /** + * Details about the input tokens used in the Response. + */ + export interface InputTokenDetails { + /** + * The number of audio tokens used in the Response. + */ + audio_tokens?: number; + + /** + * The number of cached tokens used in the Response. + */ + cached_tokens?: number; + + /** + * The number of text tokens used in the Response. + */ + text_tokens?: number; + } + + /** + * Details about the output tokens used in the Response. + */ + export interface OutputTokenDetails { + /** + * The number of audio tokens used in the Response. + */ + audio_tokens?: number; + + /** + * The number of text tokens used in the Response. + */ + text_tokens?: number; + } +} + +/** + * All events that the Realtime API can send back + */ +export type RealtimeServerEvent = + | ErrorEvent + | SessionCreatedEvent + | SessionUpdatedEvent + | ConversationCreatedEvent + | InputAudioBufferCommittedEvent + | InputAudioBufferClearedEvent + | InputAudioBufferSpeechStartedEvent + | InputAudioBufferSpeechStoppedEvent + | ConversationItemCreatedEvent + | ConversationItemInputAudioTranscriptionCompletedEvent + | ConversationItemInputAudioTranscriptionFailedEvent + | ConversationItemTruncatedEvent + | ConversationItemDeletedEvent + | ResponseCreatedEvent + | ResponseDoneEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | RateLimitsUpdatedEvent; + +/** + * Returned when the model-generated audio is updated. + */ +export interface ResponseAudioDeltaEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * Base64-encoded audio data delta. + */ + delta: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.audio.delta`. + */ + type: 'response.audio.delta'; +} + +/** + * Returned when the model-generated audio is done. Also emitted when a Response is + * interrupted, incomplete, or cancelled. + */ +export interface ResponseAudioDoneEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.audio.done`. + */ + type: 'response.audio.done'; +} + +/** + * Returned when the model-generated transcription of audio output is updated. + */ +export interface ResponseAudioTranscriptDeltaEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The transcript delta. + */ + delta: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.audio_transcript.delta`. + */ + type: 'response.audio_transcript.delta'; +} + +/** + * Returned when the model-generated transcription of audio output is done + * streaming. Also emitted when a Response is interrupted, incomplete, or + * cancelled. + */ +export interface ResponseAudioTranscriptDoneEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The final transcript of the audio. + */ + transcript: string; + + /** + * The event type, must be `response.audio_transcript.done`. + */ + type: 'response.audio_transcript.done'; +} + +/** + * Send this event to cancel an in-progress response. The server will respond with + * a `response.cancelled` event or an error if there is no response to cancel. + */ +export interface ResponseCancelEvent { + /** + * The event type, must be `response.cancel`. + */ + type: 'response.cancel'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; + + /** + * A specific response ID to cancel - if not provided, will cancel an in-progress + * response in the default conversation. + */ + response_id?: string; +} + +/** + * Returned when a new content part is added to an assistant message item during + * response generation. + */ +export interface ResponseContentPartAddedEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item to which the content part was added. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The content part that was added. + */ + part: ResponseContentPartAddedEvent.Part; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.content_part.added`. + */ + type: 'response.content_part.added'; +} + +export namespace ResponseContentPartAddedEvent { + /** + * The content part that was added. + */ + export interface Part { + /** + * Base64-encoded audio data (if type is "audio"). + */ + audio?: string; + + /** + * The text content (if type is "text"). + */ + text?: string; + + /** + * The transcript of the audio (if type is "audio"). + */ + transcript?: string; + + /** + * The content type ("text", "audio"). + */ + type?: 'text' | 'audio'; + } +} + +/** + * Returned when a content part is done streaming in an assistant message item. + * Also emitted when a Response is interrupted, incomplete, or cancelled. + */ +export interface ResponseContentPartDoneEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The content part that is done. + */ + part: ResponseContentPartDoneEvent.Part; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.content_part.done`. + */ + type: 'response.content_part.done'; +} + +export namespace ResponseContentPartDoneEvent { + /** + * The content part that is done. + */ + export interface Part { + /** + * Base64-encoded audio data (if type is "audio"). + */ + audio?: string; + + /** + * The text content (if type is "text"). + */ + text?: string; + + /** + * The transcript of the audio (if type is "audio"). + */ + transcript?: string; + + /** + * The content type ("text", "audio"). + */ + type?: 'text' | 'audio'; + } +} + +/** + * This event instructs the server to create a Response, which means triggering + * model inference. When in Server VAD mode, the server will create Responses + * automatically. + * + * A Response will include at least one Item, and may have two, in which case the + * second will be a function call. These Items will be appended to the conversation + * history. + * + * The server will respond with a `response.created` event, events for Items and + * content created, and finally a `response.done` event to indicate the Response is + * complete. + * + * The `response.create` event includes inference configuration like + * `instructions`, and `temperature`. These fields will override the Session's + * configuration for this Response only. + */ +export interface ResponseCreateEvent { + /** + * The event type, must be `response.create`. + */ + type: 'response.create'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; + + /** + * Create a new Realtime response with these parameters + */ + response?: ResponseCreateEvent.Response; +} + +export namespace ResponseCreateEvent { + /** + * Create a new Realtime response with these parameters + */ + export interface Response { + /** + * Controls which conversation the response is added to. Currently supports `auto` + * and `none`, with `auto` as the default value. The `auto` value means that the + * contents of the response will be added to the default conversation. Set this to + * `none` to create an out-of-band response which will not add items to default + * conversation. + */ + conversation?: (string & {}) | 'auto' | 'none'; + + /** + * Input items to include in the prompt for the model. Creates a new context for + * this response, without including the default conversation. Can include + * references to items from the default conversation. + */ + input?: Array; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maximum of 512 + * characters long. + */ + metadata?: unknown | null; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function, like `{"type": "function", "function": {"name": "my_function"}}`. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + } + + export namespace Response { + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + } +} + +/** + * Returned when a new Response is created. The first event of response creation, + * where the response is in an initial state of `in_progress`. + */ +export interface ResponseCreatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The response resource. + */ + response: RealtimeResponse; + + /** + * The event type, must be `response.created`. + */ + type: 'response.created'; +} + +/** + * Returned when a Response is done streaming. Always emitted, no matter the final + * state. The Response object included in the `response.done` event will include + * all output Items in the Response but will omit the raw audio data. + */ +export interface ResponseDoneEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The response resource. + */ + response: RealtimeResponse; + + /** + * The event type, must be `response.done`. + */ + type: 'response.done'; +} + +/** + * Returned when the model-generated function call arguments are updated. + */ +export interface ResponseFunctionCallArgumentsDeltaEvent { + /** + * The ID of the function call. + */ + call_id: string; + + /** + * The arguments delta as a JSON string. + */ + delta: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the function call item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.function_call_arguments.delta`. + */ + type: 'response.function_call_arguments.delta'; +} + +/** + * Returned when the model-generated function call arguments are done streaming. + * Also emitted when a Response is interrupted, incomplete, or cancelled. + */ +export interface ResponseFunctionCallArgumentsDoneEvent { + /** + * The final arguments as a JSON string. + */ + arguments: string; + + /** + * The ID of the function call. + */ + call_id: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the function call item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.function_call_arguments.done`. + */ + type: 'response.function_call_arguments.done'; +} + +/** + * Returned when a new Item is created during Response generation. + */ +export interface ResponseOutputItemAddedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: ConversationItem; + + /** + * The index of the output item in the Response. + */ + output_index: number; + + /** + * The ID of the Response to which the item belongs. + */ + response_id: string; + + /** + * The event type, must be `response.output_item.added`. + */ + type: 'response.output_item.added'; +} + +/** + * Returned when an Item is done streaming. Also emitted when a Response is + * interrupted, incomplete, or cancelled. + */ +export interface ResponseOutputItemDoneEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: ConversationItem; + + /** + * The index of the output item in the Response. + */ + output_index: number; + + /** + * The ID of the Response to which the item belongs. + */ + response_id: string; + + /** + * The event type, must be `response.output_item.done`. + */ + type: 'response.output_item.done'; +} + +/** + * Returned when the text value of a "text" content part is updated. + */ +export interface ResponseTextDeltaEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The text delta. + */ + delta: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.text.delta`. + */ + type: 'response.text.delta'; +} + +/** + * Returned when the text value of a "text" content part is done streaming. Also + * emitted when a Response is interrupted, incomplete, or cancelled. + */ +export interface ResponseTextDoneEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The final text content. + */ + text: string; + + /** + * The event type, must be `response.text.done`. + */ + type: 'response.text.done'; +} + +/** + * Returned when a Session is created. Emitted automatically when a new connection + * is established as the first server event. This event will contain the default + * Session configuration. + */ +export interface SessionCreatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * Realtime session object configuration. + */ + session: SessionsAPI.Session; + + /** + * The event type, must be `session.created`. + */ + type: 'session.created'; +} + +/** + * Send this event to update the session’s default configuration. The client may + * send this event at any time to update the session configuration, and any field + * may be updated at any time, except for "voice". The server will respond with a + * `session.updated` event that shows the full effective configuration. Only fields + * that are present are updated, thus the correct way to clear a field like + * "instructions" is to pass an empty string. + */ +export interface SessionUpdateEvent { + /** + * Realtime session object configuration. + */ + session: SessionUpdateEvent.Session; + + /** + * The event type, must be `session.update`. + */ + type: 'session.update'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +export namespace SessionUpdateEvent { + /** + * Realtime session object configuration. + */ + export interface Session { + /** + * The Realtime model used for this session. + */ + model: + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + input_audio_transcription?: Session.InputAudioTranscription; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: Session.TurnDetection; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + } + + export namespace Session { + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + export interface InputAudioTranscription { + /** + * The model to use for transcription, `whisper-1` is the only currently supported + * model. + */ + model?: string; + } + + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when VAD is enabled. `true` + * by default. + */ + create_response?: boolean; + + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: string; + } + } +} + +/** + * Returned when a session is updated with a `session.update` event, unless there + * is an error. + */ +export interface SessionUpdatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * Realtime session object configuration. + */ + session: SessionsAPI.Session; + + /** + * The event type, must be `session.updated`. + */ + type: 'session.updated'; +} + +Realtime.Sessions = Sessions; + +export declare namespace Realtime { + export { + Sessions as Sessions, + type SessionsAPISession as Session, + type SessionCreateResponse as SessionCreateResponse, + type SessionCreateParams as SessionCreateParams, + }; +} diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts new file mode 100644 index 000000000..c1082d236 --- /dev/null +++ b/src/resources/beta/realtime/sessions.ts @@ -0,0 +1,546 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; + +export class Sessions extends APIResource { + /** + * Create an ephemeral API token for use in client-side applications with the + * Realtime API. Can be configured with the same session parameters as the + * `session.update` client event. + * + * It responds with a session object, plus a `client_secret` key which contains a + * usable ephemeral API token that can be used to authenticate browser clients for + * the Realtime API. + */ + create(body: SessionCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/realtime/sessions', { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } +} + +/** + * Realtime session object configuration. + */ +export interface Session { + /** + * Unique identifier for the session object. + */ + id?: string; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + input_audio_transcription?: Session.InputAudioTranscription; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The Realtime model used for this session. + */ + model?: + | (string & {}) + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: Session.TurnDetection | null; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; +} + +export namespace Session { + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + export interface InputAudioTranscription { + /** + * The model to use for transcription, `whisper-1` is the only currently supported + * model. + */ + model?: string; + } + + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: 'server_vad'; + } +} + +/** + * A new Realtime session configuration, with an ephermeral key. Default TTL for + * keys is one minute. + */ +export interface SessionCreateResponse { + /** + * Ephemeral key returned by the API. + */ + client_secret?: SessionCreateResponse.ClientSecret; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: string; + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + input_audio_transcription?: SessionCreateResponse.InputAudioTranscription; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: string; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: SessionCreateResponse.TurnDetection; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; +} + +export namespace SessionCreateResponse { + /** + * Ephemeral key returned by the API. + */ + export interface ClientSecret { + /** + * Timestamp for when the token expires. Currently, all tokens expire after one + * minute. + */ + expires_at?: number; + + /** + * Ephemeral key usable in client environments to authenticate connections to the + * Realtime API. Use this in client-side environments rather than a standard API + * token, which should only be used server-side. + */ + value?: string; + } + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + export interface InputAudioTranscription { + /** + * The model to use for transcription, `whisper-1` is the only currently supported + * model. + */ + model?: string; + } + + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: string; + } +} + +export interface SessionCreateParams { + /** + * The Realtime model used for this session. + */ + model: + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + input_audio_transcription?: SessionCreateParams.InputAudioTranscription; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: SessionCreateParams.TurnDetection; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; +} + +export namespace SessionCreateParams { + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + export interface InputAudioTranscription { + /** + * The model to use for transcription, `whisper-1` is the only currently supported + * model. + */ + model?: string; + } + + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when VAD is enabled. `true` + * by default. + */ + create_response?: boolean; + + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: string; + } +} + +export declare namespace Sessions { + export { + type Session as Session, + type SessionCreateResponse as SessionCreateResponse, + type SessionCreateParams as SessionCreateParams, + }; +} diff --git a/tests/api-resources/beta/realtime/sessions.test.ts b/tests/api-resources/beta/realtime/sessions.test.ts new file mode 100644 index 000000000..0ed998c27 --- /dev/null +++ b/tests/api-resources/beta/realtime/sessions.test.ts @@ -0,0 +1,45 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource sessions', () => { + test('create: only required params', async () => { + const responsePromise = client.beta.realtime.sessions.create({ model: 'gpt-4o-realtime-preview' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.beta.realtime.sessions.create({ + model: 'gpt-4o-realtime-preview', + input_audio_format: 'pcm16', + input_audio_transcription: { model: 'model' }, + instructions: 'instructions', + max_response_output_tokens: 0, + modalities: ['text'], + output_audio_format: 'pcm16', + temperature: 0, + tool_choice: 'tool_choice', + tools: [{ description: 'description', name: 'name', parameters: {}, type: 'function' }], + turn_detection: { + create_response: true, + prefix_padding_ms: 0, + silence_duration_ms: 0, + threshold: 0, + type: 'type', + }, + voice: 'alloy', + }); + }); +}); From 66c9715482827f7f28f5b6b8592185ae338b5379 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 9 Jan 2025 05:07:17 +0000 Subject: [PATCH 365/533] release: 4.78.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e66c326a9..9785f7c4a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.77.4" + ".": "4.78.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a811f188..fbc82e722 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.78.0 (2025-01-09) + +Full Changelog: [v4.77.4...v4.78.0](https://github.com/openai/openai-node/compare/v4.77.4...v4.78.0) + +### Features + +* **client:** add realtime types ([#1254](https://github.com/openai/openai-node/issues/1254)) ([7130995](https://github.com/openai/openai-node/commit/71309957a9a0883cac84b8b57697b796a9df3503)) + ## 4.77.4 (2025-01-08) Full Changelog: [v4.77.3...v4.77.4](https://github.com/openai/openai-node/compare/v4.77.3...v4.77.4) diff --git a/jsr.json b/jsr.json index da442da31..e26f2d5d8 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.77.4", + "version": "4.78.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 453859b6b..ab06be9cf 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.77.4", + "version": "4.78.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 7f6adc9bc..7ab855b86 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.77.4'; // x-release-please-version +export const VERSION = '4.78.0'; // x-release-please-version From 6070d964f6d62789f7deb670daa49f3c4f0a6f40 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 15:56:22 +0000 Subject: [PATCH 366/533] fix: send correct Accept header for certain endpoints (#1257) --- src/resources/audio/speech.ts | 7 ++++++- src/resources/files.ts | 11 ++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 1cda80f79..bd2ed9f65 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -9,7 +9,12 @@ export class Speech extends APIResource { * Generates audio from the input text. */ create(body: SpeechCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/audio/speech', { body, ...options, __binaryResponse: true }); + return this._client.post('/audio/speech', { + body, + ...options, + headers: { Accept: 'application/octet-stream', ...options?.headers }, + __binaryResponse: true, + }); } } diff --git a/src/resources/files.ts b/src/resources/files.ts index 42a7bdfba..43708310b 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -69,7 +69,11 @@ export class Files extends APIResource { * Returns the contents of the specified file. */ content(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/files/${fileId}/content`, { ...options, __binaryResponse: true }); + return this._client.get(`/files/${fileId}/content`, { + ...options, + headers: { Accept: 'application/binary', ...options?.headers }, + __binaryResponse: true, + }); } /** @@ -78,10 +82,7 @@ export class Files extends APIResource { * @deprecated The `.content()` method should be used instead */ retrieveContent(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/files/${fileId}/content`, { - ...options, - headers: { Accept: 'application/json', ...options?.headers }, - }); + return this._client.get(`/files/${fileId}/content`, options); } /** From 14784f95797d4d525dafecfd4ec9c7a133540da0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 15:56:57 +0000 Subject: [PATCH 367/533] release: 4.78.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9785f7c4a..3218ab333 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.78.0" + ".": "4.78.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index fbc82e722..320d00140 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.78.1 (2025-01-10) + +Full Changelog: [v4.78.0...v4.78.1](https://github.com/openai/openai-node/compare/v4.78.0...v4.78.1) + +### Bug Fixes + +* send correct Accept header for certain endpoints ([#1257](https://github.com/openai/openai-node/issues/1257)) ([8756693](https://github.com/openai/openai-node/commit/8756693c5690b16045cdd8d33636fe7643d45f3a)) + ## 4.78.0 (2025-01-09) Full Changelog: [v4.77.4...v4.78.0](https://github.com/openai/openai-node/compare/v4.77.4...v4.78.0) diff --git a/jsr.json b/jsr.json index e26f2d5d8..257faa02d 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.78.0", + "version": "4.78.1", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index ab06be9cf..ff6ec16bc 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.78.0", + "version": "4.78.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 7ab855b86..a8ac58ba2 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.78.0'; // x-release-please-version +export const VERSION = '4.78.1'; // x-release-please-version From b08a846a9aae3686574527fa2a8d91bb0e6c7aaf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:19:51 +0000 Subject: [PATCH 368/533] chore(internal): streaming refactors (#1261) --- src/streaming.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/streaming.ts b/src/streaming.ts index 2891e6ac3..da633f7fd 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -38,9 +38,7 @@ export class Stream implements AsyncIterable { if (sse.data.startsWith('[DONE]')) { done = true; continue; - } - - if (sse.event === null) { + } else { let data; try { From 55f084dfcae4229075ad7ebc33fff2ef4cd095e5 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 13 Jan 2025 16:28:31 +0000 Subject: [PATCH 369/533] chore: fix streaming --- src/streaming.ts | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/src/streaming.ts b/src/streaming.ts index da633f7fd..9cfd18176 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -52,22 +52,12 @@ export class Stream implements AsyncIterable { if (data && data.error) { throw new APIError(undefined, data.error, undefined, undefined); } - - yield data; - } else { - let data; - try { - data = JSON.parse(sse.data); - } catch (e) { - console.error(`Could not parse message into JSON:`, sse.data); - console.error(`From chunk:`, sse.raw); - throw e; - } // TODO: Is this where the error should be thrown? if (sse.event == 'error') { throw new APIError(undefined, data.error, data.message, undefined); } - yield { event: sse.event, data: data } as any; + + yield data; } } done = true; From 620ecd506fbf379018cf8f7a7fe92253ac49c9af Mon Sep 17 00:00:00 2001 From: Minh-Anh Phan <111523473+minhanh-phan@users.noreply.github.com> Date: Mon, 13 Jan 2025 11:42:15 -0800 Subject: [PATCH 370/533] fix(logs/azure): redact sensitive header when DEBUG is set (#1218) --- src/core.ts | 36 ++++++++++++++++- tests/index.test.ts | 94 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 128 insertions(+), 2 deletions(-) diff --git a/src/core.ts b/src/core.ts index 972cceaec..3d2d029a5 100644 --- a/src/core.ts +++ b/src/core.ts @@ -1148,9 +1148,43 @@ function applyHeadersMut(targetHeaders: Headers, newHeaders: Headers): void { } } +const SENSITIVE_HEADERS = new Set(['authorization', 'api-key']); + export function debug(action: string, ...args: any[]) { if (typeof process !== 'undefined' && process?.env?.['DEBUG'] === 'true') { - console.log(`OpenAI:DEBUG:${action}`, ...args); + const modifiedArgs = args.map((arg) => { + if (!arg) { + return arg; + } + + // Check for sensitive headers in request body 'headers' object + if (arg['headers']) { + // clone so we don't mutate + const modifiedArg = { ...arg, headers: { ...arg['headers'] } }; + + for (const header in arg['headers']) { + if (SENSITIVE_HEADERS.has(header.toLowerCase())) { + modifiedArg['headers'][header] = 'REDACTED'; + } + } + + return modifiedArg; + } + + let modifiedArg = null; + + // Check for sensitive headers in headers object + for (const header in arg) { + if (SENSITIVE_HEADERS.has(header.toLowerCase())) { + // avoid making a copy until we need to + modifiedArg ??= { ...arg }; + modifiedArg[header] = 'REDACTED'; + } + } + + return modifiedArg ?? arg; + }); + console.log(`OpenAI:DEBUG:${action}`, ...modifiedArgs); } } diff --git a/tests/index.test.ts b/tests/index.test.ts index a6f0040a4..016d525f5 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -2,7 +2,7 @@ import OpenAI from 'openai'; import { APIUserAbortError } from 'openai'; -import { Headers } from 'openai/core'; +import { debug, Headers } from 'openai/core'; import defaultFetch, { Response, type RequestInit, type RequestInfo } from 'node-fetch'; describe('instantiate client', () => { @@ -424,3 +424,95 @@ describe('retries', () => { expect(count).toEqual(3); }); }); + +describe('debug()', () => { + const env = process.env; + const spy = jest.spyOn(console, 'log'); + + beforeEach(() => { + jest.resetModules(); + process.env = { ...env }; + process.env['DEBUG'] = 'true'; + }); + + afterEach(() => { + process.env = env; + }); + + test('body request object with Authorization header', function () { + // Test request body includes headers object with Authorization + const headersTest = { + headers: { + Authorization: 'fakeAuthorization', + }, + }; + debug('request', headersTest); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + headers: { + Authorization: 'REDACTED', + }, + }); + }); + + test('body request object with api-key header', function () { + // Test request body includes headers object with api-ley + const apiKeyTest = { + headers: { + 'api-key': 'fakeKey', + }, + }; + debug('request', apiKeyTest); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + headers: { + 'api-key': 'REDACTED', + }, + }); + }); + + test('header object with Authorization header', function () { + // Test headers object with authorization header + const authorizationTest = { + authorization: 'fakeValue', + }; + debug('request', authorizationTest); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + authorization: 'REDACTED', + }); + }); + + test('input args are not mutated', function () { + const authorizationTest = { + authorization: 'fakeValue', + }; + const client = new OpenAI({ + baseURL: 'http://localhost:5000/', + defaultHeaders: authorizationTest, + apiKey: 'api-key', + }); + + const { req } = client.buildRequest({ path: '/foo', method: 'post' }); + debug('request', authorizationTest); + expect((req.headers as Headers)['authorization']).toEqual('fakeValue'); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + authorization: 'REDACTED', + }); + }); + + test('input headers are not mutated', function () { + const authorizationTest = { + authorization: 'fakeValue', + }; + const client = new OpenAI({ + baseURL: 'http://localhost:5000/', + defaultHeaders: authorizationTest, + apiKey: 'api-key', + }); + + const { req } = client.buildRequest({ path: '/foo', method: 'post' }); + debug('request', { headers: req.headers }); + expect((req.headers as Headers)['authorization']).toEqual('fakeValue'); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + authorization: 'REDACTED', + }); + }); +}); From 2bc96529a32fbddc8a86c53dbd8bbb93f703e056 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 13 Jan 2025 20:23:42 +0000 Subject: [PATCH 371/533] Revert "chore(internal): streaming refactors (#1261)" This reverts commit dd4af939792583854a313367c5fe2f98eea2f3c8. --- src/streaming.ts | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/streaming.ts b/src/streaming.ts index 9cfd18176..2891e6ac3 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -38,7 +38,9 @@ export class Stream implements AsyncIterable { if (sse.data.startsWith('[DONE]')) { done = true; continue; - } else { + } + + if (sse.event === null) { let data; try { @@ -52,12 +54,22 @@ export class Stream implements AsyncIterable { if (data && data.error) { throw new APIError(undefined, data.error, undefined, undefined); } + + yield data; + } else { + let data; + try { + data = JSON.parse(sse.data); + } catch (e) { + console.error(`Could not parse message into JSON:`, sse.data); + console.error(`From chunk:`, sse.raw); + throw e; + } // TODO: Is this where the error should be thrown? if (sse.event == 'error') { throw new APIError(undefined, data.error, data.message, undefined); } - - yield data; + yield { event: sse.event, data: data } as any; } } done = true; From 5df77388f6a8cfc3ac465f77825f01ceb41fa505 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 11:59:41 +0000 Subject: [PATCH 372/533] chore(types): rename vector store chunking strategy (#1263) --- api.md | 2 +- src/resources/beta/beta.ts | 4 ++-- src/resources/beta/index.ts | 2 +- src/resources/beta/vector-stores/index.ts | 2 +- src/resources/beta/vector-stores/vector-stores.ts | 6 +++--- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api.md b/api.md index a885628a3..33ab95ef6 100644 --- a/api.md +++ b/api.md @@ -283,7 +283,7 @@ Types: - OtherFileChunkingStrategyObject - StaticFileChunkingStrategy - StaticFileChunkingStrategyObject -- StaticFileChunkingStrategyParam +- StaticFileChunkingStrategyObjectParam - VectorStore - VectorStoreDeleted diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index ccd043243..df929b2f7 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -48,7 +48,7 @@ import { OtherFileChunkingStrategyObject, StaticFileChunkingStrategy, StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyParam, + StaticFileChunkingStrategyObjectParam, VectorStore, VectorStoreCreateParams, VectorStoreDeleted, @@ -85,7 +85,7 @@ export declare namespace Beta { type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, type StaticFileChunkingStrategy as StaticFileChunkingStrategy, type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam, + type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, VectorStoresPage as VectorStoresPage, diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index aa2e52d4c..babca0016 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -46,7 +46,7 @@ export { type OtherFileChunkingStrategyObject, type StaticFileChunkingStrategy, type StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyParam, + type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, type VectorStoreCreateParams, diff --git a/src/resources/beta/vector-stores/index.ts b/src/resources/beta/vector-stores/index.ts index 89fc0cde0..d587bd160 100644 --- a/src/resources/beta/vector-stores/index.ts +++ b/src/resources/beta/vector-stores/index.ts @@ -23,7 +23,7 @@ export { type OtherFileChunkingStrategyObject, type StaticFileChunkingStrategy, type StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyParam, + type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, type VectorStoreCreateParams, diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index 35ad8c369..cbff2d562 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -116,7 +116,7 @@ export type FileChunkingStrategy = StaticFileChunkingStrategyObject | OtherFileC * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. */ -export type FileChunkingStrategyParam = AutoFileChunkingStrategyParam | StaticFileChunkingStrategyParam; +export type FileChunkingStrategyParam = AutoFileChunkingStrategyParam | StaticFileChunkingStrategyObjectParam; /** * This is returned when the chunking strategy is unknown. Typically, this is @@ -154,7 +154,7 @@ export interface StaticFileChunkingStrategyObject { type: 'static'; } -export interface StaticFileChunkingStrategyParam { +export interface StaticFileChunkingStrategyObjectParam { static: StaticFileChunkingStrategy; /** @@ -397,7 +397,7 @@ export declare namespace VectorStores { type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, type StaticFileChunkingStrategy as StaticFileChunkingStrategy, type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam, + type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, VectorStoresPage as VectorStoresPage, From 66067d37a4189f838f31ed9ca06ee335aef67616 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 13:58:18 +0000 Subject: [PATCH 373/533] chore(types): add `| undefined` to client options properties (#1264) --- src/index.ts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/index.ts b/src/index.ts index 2320850fb..cf6aa89e3 100644 --- a/src/index.ts +++ b/src/index.ts @@ -137,7 +137,7 @@ export interface ClientOptions { * Note that request timeouts are retried by default, so in a worst-case scenario you may wait * much longer than this timeout before the promise succeeds or fails. */ - timeout?: number; + timeout?: number | undefined; /** * An HTTP agent used to manage HTTP(S) connections. @@ -145,7 +145,7 @@ export interface ClientOptions { * If not provided, an agent will be constructed by default in the Node.js environment, * otherwise no agent is used. */ - httpAgent?: Agent; + httpAgent?: Agent | undefined; /** * Specify a custom `fetch` function implementation. @@ -161,7 +161,7 @@ export interface ClientOptions { * * @default 2 */ - maxRetries?: number; + maxRetries?: number | undefined; /** * Default headers to include with every request to the API. @@ -169,7 +169,7 @@ export interface ClientOptions { * These can be removed in individual requests by explicitly setting the * header to `undefined` or `null` in request options. */ - defaultHeaders?: Core.Headers; + defaultHeaders?: Core.Headers | undefined; /** * Default query parameters to include with every request to the API. @@ -177,13 +177,13 @@ export interface ClientOptions { * These can be removed in individual requests by explicitly setting the * param to `undefined` in request options. */ - defaultQuery?: Core.DefaultQuery; + defaultQuery?: Core.DefaultQuery | undefined; /** * By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. * Only set this option to `true` if you understand the risks and have appropriate mitigations in place. */ - dangerouslyAllowBrowser?: boolean; + dangerouslyAllowBrowser?: boolean | undefined; } /** From a796d21f06307419f352da8b9943f6745ff4084f Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 16 Jan 2025 16:33:38 +0000 Subject: [PATCH 374/533] feat(client): add Realtime API support (#1266) --- README.md | 87 ++++++++++++++++++++++++++ examples/package.json | 7 ++- examples/realtime/websocket.ts | 48 +++++++++++++++ examples/realtime/ws.ts | 55 +++++++++++++++++ package.json | 6 ++ src/beta/realtime/index.ts | 1 + src/beta/realtime/internal-base.ts | 83 +++++++++++++++++++++++++ src/beta/realtime/websocket.ts | 97 +++++++++++++++++++++++++++++ src/beta/realtime/ws.ts | 69 +++++++++++++++++++++ src/lib/EventEmitter.ts | 98 ++++++++++++++++++++++++++++++ yarn.lock | 12 ++++ 11 files changed, 560 insertions(+), 3 deletions(-) create mode 100644 examples/realtime/websocket.ts create mode 100644 examples/realtime/ws.ts create mode 100644 src/beta/realtime/index.ts create mode 100644 src/beta/realtime/internal-base.ts create mode 100644 src/beta/realtime/websocket.ts create mode 100644 src/beta/realtime/ws.ts create mode 100644 src/lib/EventEmitter.ts diff --git a/README.md b/README.md index 3039857a1..e7d69a669 100644 --- a/README.md +++ b/README.md @@ -83,6 +83,93 @@ main(); If you need to cancel a stream, you can `break` from the loop or call `stream.controller.abort()`. +## Realtime API beta + +The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a `WebSocket` connection. + +The Realtime API works through a combination of client-sent events and server-sent events. Clients can send events to do things like update session configuration or send text and audio inputs. Server events confirm when audio responses have completed, or when a text response from the model has been received. A full event reference can be found [here](https://platform.openai.com/docs/api-reference/realtime-client-events) and a guide can be found [here](https://platform.openai.com/docs/guides/realtime). + +This SDK supports accessing the Realtime API through the [WebSocket API](https://developer.mozilla.org/en-US/docs/Web/API/WebSocket) or with [ws](https://github.com/websockets/ws). + +Basic text based example with `ws`: + +```ts +// requires `yarn add ws @types/ws` +import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; + +const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + +// access the underlying `ws.WebSocket` instance +rt.socket.on('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); +}); + +rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; +}); + +rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); +}); + +rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); +rt.on('response.text.done', () => console.log()); + +rt.on('response.done', () => rt.close()); + +rt.socket.on('close', () => console.log('\nConnection closed!')); +``` + +To use the web API `WebSocket` implementation, replace `OpenAIRealtimeWS` with `OpenAIRealtimeWebSocket` and adjust any `rt.socket` access: + +```ts +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; + +const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); +// ... +rt.socket.addEventListener('open', () => { + // ... +}); +``` + +A full example can be found [here](https://github.com/openai/openai-node/blob/master/examples/realtime/web.ts). + +### Realtime error handling + +When an error is encountered, either on the client side or returned from the server through the [`error` event](https://platform.openai.com/docs/guides/realtime/realtime-api-beta#handling-errors), the `error` event listener will be fired. However, if you haven't registered an `error` event listener then an `unhandled Promise rejection` error will be thrown. + +It is **highly recommended** that you register an `error` event listener and handle errors approriately as typically the underlying connection is still usable. + +```ts +const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); +rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; +}); +``` + ### Request & Response types This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: diff --git a/examples/package.json b/examples/package.json index c8a5f7087..b8c34ac45 100644 --- a/examples/package.json +++ b/examples/package.json @@ -6,14 +6,15 @@ "license": "MIT", "private": true, "dependencies": { + "@azure/identity": "^4.2.0", "express": "^4.18.2", "next": "^14.1.1", "openai": "file:..", - "zod-to-json-schema": "^3.21.4", - "@azure/identity": "^4.2.0" + "zod-to-json-schema": "^3.21.4" }, "devDependencies": { "@types/body-parser": "^1.19.3", - "@types/express": "^4.17.19" + "@types/express": "^4.17.19", + "@types/web": "^0.0.194" } } diff --git a/examples/realtime/websocket.ts b/examples/realtime/websocket.ts new file mode 100644 index 000000000..0da131bc3 --- /dev/null +++ b/examples/realtime/websocket.ts @@ -0,0 +1,48 @@ +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; + +async function main() { + const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + + // access the underlying `ws.WebSocket` instance + rt.socket.addEventListener('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); + }); + + rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; + }); + + rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); + }); + + rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); + rt.on('response.text.done', () => console.log()); + + rt.on('response.done', () => rt.close()); + + rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); +} + +main(); diff --git a/examples/realtime/ws.ts b/examples/realtime/ws.ts new file mode 100644 index 000000000..4bbe85e5d --- /dev/null +++ b/examples/realtime/ws.ts @@ -0,0 +1,55 @@ +import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; + +async function main() { + const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + + // access the underlying `ws.WebSocket` instance + rt.socket.on('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['foo'] as any, + model: 'gpt-4o-realtime-preview', + }, + }); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); + }); + + rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; + }); + + rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); + }); + + rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); + rt.on('response.text.done', () => console.log()); + + rt.on('response.done', () => rt.close()); + + rt.socket.on('close', () => console.log('\nConnection closed!')); +} + +main(); diff --git a/package.json b/package.json index ff6ec16bc..77e2d609f 100644 --- a/package.json +++ b/package.json @@ -36,6 +36,7 @@ "@swc/core": "^1.3.102", "@swc/jest": "^0.2.29", "@types/jest": "^29.4.0", + "@types/ws": "^8.5.13", "@typescript-eslint/eslint-plugin": "^6.7.0", "@typescript-eslint/parser": "^6.7.0", "eslint": "^8.49.0", @@ -52,6 +53,7 @@ "tsc-multi": "^1.1.0", "tsconfig-paths": "^4.0.0", "typescript": "^4.8.2", + "ws": "^8.18.0", "zod": "^3.23.8" }, "sideEffects": [ @@ -126,9 +128,13 @@ }, "bin": "./bin/cli", "peerDependencies": { + "ws": "^8.18.0", "zod": "^3.23.8" }, "peerDependenciesMeta": { + "ws": { + "optional": true + }, "zod": { "optional": true } diff --git a/src/beta/realtime/index.ts b/src/beta/realtime/index.ts new file mode 100644 index 000000000..75f0f3088 --- /dev/null +++ b/src/beta/realtime/index.ts @@ -0,0 +1 @@ +export { OpenAIRealtimeError } from './internal-base'; diff --git a/src/beta/realtime/internal-base.ts b/src/beta/realtime/internal-base.ts new file mode 100644 index 000000000..391d69911 --- /dev/null +++ b/src/beta/realtime/internal-base.ts @@ -0,0 +1,83 @@ +import { RealtimeClientEvent, RealtimeServerEvent, ErrorEvent } from '../../resources/beta/realtime/realtime'; +import { EventEmitter } from '../../lib/EventEmitter'; +import { OpenAIError } from '../../error'; + +export class OpenAIRealtimeError extends OpenAIError { + /** + * The error data that the API sent back in an `error` event. + */ + error?: ErrorEvent.Error | undefined; + + /** + * The unique ID of the server event. + */ + event_id?: string | undefined; + + constructor(message: string, event: ErrorEvent | null) { + super(message); + + this.error = event?.error; + this.event_id = event?.event_id; + } +} + +type Simplify = { [KeyType in keyof T]: T[KeyType] } & {}; + +type RealtimeEvents = Simplify< + { + event: (event: RealtimeServerEvent) => void; + error: (error: OpenAIRealtimeError) => void; + } & { + [EventType in Exclude]: ( + event: Extract, + ) => unknown; + } +>; + +export abstract class OpenAIRealtimeEmitter extends EventEmitter { + /** + * Send an event to the API. + */ + abstract send(event: RealtimeClientEvent): void; + + /** + * Close the websocket connection. + */ + abstract close(props?: { code: number; reason: string }): void; + + protected _onError(event: null, message: string, cause: any): void; + protected _onError(event: ErrorEvent, message?: string | undefined): void; + protected _onError(event: ErrorEvent | null, message?: string | undefined, cause?: any): void { + message = + event?.error ? + `${event.error.message} code=${event.error.code} param=${event.error.param} type=${event.error.type} event_id=${event.error.event_id}` + : message ?? 'unknown error'; + + if (!this._hasListener('error')) { + const error = new OpenAIRealtimeError( + message + + `\n\nTo resolve these unhandled rejection errors you should bind an \`error\` callback, e.g. \`rt.on('error', (error) => ...)\` `, + event, + ); + // @ts-ignore + error.cause = cause; + Promise.reject(error); + return; + } + + const error = new OpenAIRealtimeError(message, event); + // @ts-ignore + error.cause = cause; + + this._emit('error', error); + } +} + +export function buildRealtimeURL(props: { baseURL: string; model: string }): URL { + const path = '/realtime'; + + const url = new URL(props.baseURL + (props.baseURL.endsWith('/') ? path.slice(1) : path)); + url.protocol = 'wss'; + url.searchParams.set('model', props.model); + return url; +} diff --git a/src/beta/realtime/websocket.ts b/src/beta/realtime/websocket.ts new file mode 100644 index 000000000..e0853779d --- /dev/null +++ b/src/beta/realtime/websocket.ts @@ -0,0 +1,97 @@ +import { OpenAI } from '../../index'; +import { OpenAIError } from '../../error'; +import * as Core from '../../core'; +import type { RealtimeClientEvent, RealtimeServerEvent } from '../../resources/beta/realtime/realtime'; +import { OpenAIRealtimeEmitter, buildRealtimeURL } from './internal-base'; + +interface MessageEvent { + data: string; +} + +type _WebSocket = + typeof globalThis extends ( + { + WebSocket: infer ws; + } + ) ? + // @ts-ignore + InstanceType + : any; + +export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { + url: URL; + socket: _WebSocket; + + constructor( + props: { + model: string; + dangerouslyAllowBrowser?: boolean; + }, + client?: Pick, + ) { + super(); + + const dangerouslyAllowBrowser = + props.dangerouslyAllowBrowser ?? + (client as any)?._options?.dangerouslyAllowBrowser ?? + (client?.apiKey.startsWith('ek_') ? true : null); + + if (!dangerouslyAllowBrowser && Core.isRunningInBrowser()) { + throw new OpenAIError( + "It looks like you're running in a browser-like environment.\n\nThis is disabled by default, as it risks exposing your secret API credentials to attackers.\n\nYou can avoid this error by creating an ephemeral session token:\nhttps://platform.openai.com/docs/api-reference/realtime-sessions\n", + ); + } + + client ??= new OpenAI({ dangerouslyAllowBrowser }); + + this.url = buildRealtimeURL({ baseURL: client.baseURL, model: props.model }); + // @ts-ignore + this.socket = new WebSocket(this.url, [ + 'realtime', + `openai-insecure-api-key.${client.apiKey}`, + 'openai-beta.realtime-v1', + ]); + + this.socket.addEventListener('message', (websocketEvent: MessageEvent) => { + const event = (() => { + try { + return JSON.parse(websocketEvent.data.toString()) as RealtimeServerEvent; + } catch (err) { + this._onError(null, 'could not parse websocket event', err); + return null; + } + })(); + + if (event) { + this._emit('event', event); + + if (event.type === 'error') { + this._onError(event); + } else { + // @ts-expect-error TS isn't smart enough to get the relationship right here + this._emit(event.type, event); + } + } + }); + + this.socket.addEventListener('error', (event: any) => { + this._onError(null, event.message, null); + }); + } + + send(event: RealtimeClientEvent) { + try { + this.socket.send(JSON.stringify(event)); + } catch (err) { + this._onError(null, 'could not send data', err); + } + } + + close(props?: { code: number; reason: string }) { + try { + this.socket.close(props?.code ?? 1000, props?.reason ?? 'OK'); + } catch (err) { + this._onError(null, 'could not close the connection', err); + } + } +} diff --git a/src/beta/realtime/ws.ts b/src/beta/realtime/ws.ts new file mode 100644 index 000000000..33bb11ad9 --- /dev/null +++ b/src/beta/realtime/ws.ts @@ -0,0 +1,69 @@ +import WS from 'ws'; +import { OpenAI } from '../../index'; +import type { RealtimeClientEvent, RealtimeServerEvent } from '../../resources/beta/realtime/realtime'; +import { OpenAIRealtimeEmitter, buildRealtimeURL } from './internal-base'; + +export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { + url: URL; + socket: WS.WebSocket; + + constructor( + props: { model: string; options?: WS.ClientOptions | undefined }, + client?: Pick, + ) { + super(); + client ??= new OpenAI(); + + this.url = buildRealtimeURL({ baseURL: client.baseURL, model: props.model }); + this.socket = new WS.WebSocket(this.url, { + ...props.options, + headers: { + ...props.options?.headers, + Authorization: `Bearer ${client.apiKey}`, + 'OpenAI-Beta': 'realtime=v1', + }, + }); + + this.socket.on('message', (wsEvent) => { + const event = (() => { + try { + return JSON.parse(wsEvent.toString()) as RealtimeServerEvent; + } catch (err) { + this._onError(null, 'could not parse websocket event', err); + return null; + } + })(); + + if (event) { + this._emit('event', event); + + if (event.type === 'error') { + this._onError(event); + } else { + // @ts-expect-error TS isn't smart enough to get the relationship right here + this._emit(event.type, event); + } + } + }); + + this.socket.on('error', (err) => { + this._onError(null, err.message, err); + }); + } + + send(event: RealtimeClientEvent) { + try { + this.socket.send(JSON.stringify(event)); + } catch (err) { + this._onError(null, 'could not send data', err); + } + } + + close(props?: { code: number; reason: string }) { + try { + this.socket.close(props?.code ?? 1000, props?.reason ?? 'OK'); + } catch (err) { + this._onError(null, 'could not close the connection', err); + } + } +} diff --git a/src/lib/EventEmitter.ts b/src/lib/EventEmitter.ts new file mode 100644 index 000000000..9adeebdc3 --- /dev/null +++ b/src/lib/EventEmitter.ts @@ -0,0 +1,98 @@ +type EventListener = Events[EventType]; + +type EventListeners = Array<{ + listener: EventListener; + once?: boolean; +}>; + +export type EventParameters = { + [Event in EventType]: EventListener extends (...args: infer P) => any ? P : never; +}[EventType]; + +export class EventEmitter any>> { + #listeners: { + [Event in keyof EventTypes]?: EventListeners; + } = {}; + + /** + * Adds the listener function to the end of the listeners array for the event. + * No checks are made to see if the listener has already been added. Multiple calls passing + * the same combination of event and listener will result in the listener being added, and + * called, multiple times. + * @returns this, so that calls can be chained + */ + on(event: Event, listener: EventListener): this { + const listeners: EventListeners = + this.#listeners[event] || (this.#listeners[event] = []); + listeners.push({ listener }); + return this; + } + + /** + * Removes the specified listener from the listener array for the event. + * off() will remove, at most, one instance of a listener from the listener array. If any single + * listener has been added multiple times to the listener array for the specified event, then + * off() must be called multiple times to remove each instance. + * @returns this, so that calls can be chained + */ + off(event: Event, listener: EventListener): this { + const listeners = this.#listeners[event]; + if (!listeners) return this; + const index = listeners.findIndex((l) => l.listener === listener); + if (index >= 0) listeners.splice(index, 1); + return this; + } + + /** + * Adds a one-time listener function for the event. The next time the event is triggered, + * this listener is removed and then invoked. + * @returns this, so that calls can be chained + */ + once(event: Event, listener: EventListener): this { + const listeners: EventListeners = + this.#listeners[event] || (this.#listeners[event] = []); + listeners.push({ listener, once: true }); + return this; + } + + /** + * This is similar to `.once()`, but returns a Promise that resolves the next time + * the event is triggered, instead of calling a listener callback. + * @returns a Promise that resolves the next time given event is triggered, + * or rejects if an error is emitted. (If you request the 'error' event, + * returns a promise that resolves with the error). + * + * Example: + * + * const message = await stream.emitted('message') // rejects if the stream errors + */ + emitted( + event: Event, + ): Promise< + EventParameters extends [infer Param] ? Param + : EventParameters extends [] ? void + : EventParameters + > { + return new Promise((resolve, reject) => { + // TODO: handle errors + this.once(event, resolve as any); + }); + } + + protected _emit( + this: EventEmitter, + event: Event, + ...args: EventParameters + ) { + const listeners: EventListeners | undefined = this.#listeners[event]; + if (listeners) { + this.#listeners[event] = listeners.filter((l) => !l.once) as any; + listeners.forEach(({ listener }: any) => listener(...(args as any))); + } + } + + protected _hasListener(event: keyof EventTypes): boolean { + const listeners = this.#listeners[event]; + return listeners && listeners.length > 0; + } +} diff --git a/yarn.lock b/yarn.lock index c0220f984..0a4307f70 100644 --- a/yarn.lock +++ b/yarn.lock @@ -881,6 +881,13 @@ resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-2.0.3.tgz#6209321eb2c1712a7e7466422b8cb1fc0d9dd5d8" integrity sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw== +"@types/ws@^8.5.13": + version "8.5.13" + resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.13.tgz#6414c280875e2691d0d1e080b05addbf5cb91e20" + integrity sha512-osM/gWBTPKgHV8XkTunnegTRIsvF6owmf5w+JtAfOw472dptdm0dlGv4xCt6GwQRcC2XVOvvRE/0bAoQcL2QkA== + dependencies: + "@types/node" "*" + "@types/yargs-parser@*": version "21.0.3" resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-21.0.3.tgz#815e30b786d2e8f0dcd85fd5bcf5e1a04d008f15" @@ -3472,6 +3479,11 @@ write-file-atomic@^4.0.2: imurmurhash "^0.1.4" signal-exit "^3.0.7" +ws@^8.18.0: + version "8.18.0" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.18.0.tgz#0d7505a6eafe2b0e712d232b42279f53bc289bbc" + integrity sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw== + y18n@^5.0.5: version "5.0.8" resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" From 9d214eac82509028787b6ad148fec46689af74d3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 05:06:39 +0000 Subject: [PATCH 375/533] release: 4.79.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 21 +++++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 25 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3218ab333..a4062b378 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.78.1" + ".": "4.79.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 320d00140..c2021f78a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## 4.79.0 (2025-01-17) + +Full Changelog: [v4.78.1...v4.79.0](https://github.com/openai/openai-node/compare/v4.78.1...v4.79.0) + +### Features + +* **client:** add Realtime API support ([#1266](https://github.com/openai/openai-node/issues/1266)) ([7160ebe](https://github.com/openai/openai-node/commit/7160ebe647769fbf48a600c9961d1a6f86dc9622)) + + +### Bug Fixes + +* **logs/azure:** redact sensitive header when DEBUG is set ([#1218](https://github.com/openai/openai-node/issues/1218)) ([6a72fd7](https://github.com/openai/openai-node/commit/6a72fd736733db19504a829bf203b39d5b9e3644)) + + +### Chores + +* fix streaming ([379c743](https://github.com/openai/openai-node/commit/379c7435ed5d508458e9cdc22386039b84fcec5e)) +* **internal:** streaming refactors ([#1261](https://github.com/openai/openai-node/issues/1261)) ([dd4af93](https://github.com/openai/openai-node/commit/dd4af939792583854a313367c5fe2f98eea2f3c8)) +* **types:** add `| undefined` to client options properties ([#1264](https://github.com/openai/openai-node/issues/1264)) ([5e56979](https://github.com/openai/openai-node/commit/5e569799b9ac8f915b16de90d91d38b568c1edce)) +* **types:** rename vector store chunking strategy ([#1263](https://github.com/openai/openai-node/issues/1263)) ([d31acee](https://github.com/openai/openai-node/commit/d31acee860c80ba945d4e70b956c7ed75f5f849a)) + ## 4.78.1 (2025-01-10) Full Changelog: [v4.78.0...v4.78.1](https://github.com/openai/openai-node/compare/v4.78.0...v4.78.1) diff --git a/jsr.json b/jsr.json index 257faa02d..ac02a7435 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.78.1", + "version": "4.79.0", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 77e2d609f..3b01be9fe 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.78.1", + "version": "4.79.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index a8ac58ba2..afc5d7104 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.78.1'; // x-release-please-version +export const VERSION = '4.79.0'; // x-release-please-version From 6cd83178324271763c3b3ba236ea5406c1447dd4 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 17 Jan 2025 19:39:08 +0000 Subject: [PATCH 376/533] fix(realtime): correct import syntax (#1267) --- src/beta/realtime/ws.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/beta/realtime/ws.ts b/src/beta/realtime/ws.ts index 33bb11ad9..631a36cd2 100644 --- a/src/beta/realtime/ws.ts +++ b/src/beta/realtime/ws.ts @@ -1,4 +1,4 @@ -import WS from 'ws'; +import * as WS from 'ws'; import { OpenAI } from '../../index'; import type { RealtimeClientEvent, RealtimeServerEvent } from '../../resources/beta/realtime/realtime'; import { OpenAIRealtimeEmitter, buildRealtimeURL } from './internal-base'; From 8383975a2e45aa222fcf56a45b38834bcf8b31c7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 19:39:37 +0000 Subject: [PATCH 377/533] release: 4.79.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a4062b378..8d95306a8 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.79.0" + ".": "4.79.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index c2021f78a..d24eeffa5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.79.1 (2025-01-17) + +Full Changelog: [v4.79.0...v4.79.1](https://github.com/openai/openai-node/compare/v4.79.0...v4.79.1) + +### Bug Fixes + +* **realtime:** correct import syntax ([#1267](https://github.com/openai/openai-node/issues/1267)) ([74702a7](https://github.com/openai/openai-node/commit/74702a739f566810d2b6c4e0832cfa17a1d1e272)) + ## 4.79.0 (2025-01-17) Full Changelog: [v4.78.1...v4.79.0](https://github.com/openai/openai-node/compare/v4.78.1...v4.79.0) diff --git a/jsr.json b/jsr.json index ac02a7435..9f4dbe4b6 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.79.0", + "version": "4.79.1", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 3b01be9fe..2984cf2d8 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.79.0", + "version": "4.79.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index afc5d7104..587a3c245 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.79.0'; // x-release-please-version +export const VERSION = '4.79.1'; // x-release-please-version From 6f3ad43ac1bbb8f8f6c8fae9e83398d85cead56c Mon Sep 17 00:00:00 2001 From: Kevin Whinnery Date: Fri, 17 Jan 2025 15:12:04 -0600 Subject: [PATCH 378/533] Create export for WebSocket on Deno/JSR --- jsr.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/jsr.json b/jsr.json index 9f4dbe4b6..72575a407 100644 --- a/jsr.json +++ b/jsr.json @@ -1,7 +1,10 @@ { "name": "@openai/openai", "version": "4.79.1", - "exports": "./index.ts", + "exports": { + ".": "./index.ts", + "./beta/realtime/websocket": "./beta/realtime/websocket.ts" + }, "publish": { "exclude": [ "!." From 4640dc608f7f55624656007207c49feb5f3047e3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 17:59:02 +0000 Subject: [PATCH 379/533] chore(internal): add test (#1270) --- tests/index.test.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/index.test.ts b/tests/index.test.ts index 016d525f5..6227d6fbe 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -96,6 +96,15 @@ describe('instantiate client', () => { expect(response).toEqual({ url: 'http://localhost:5000/foo', custom: true }); }); + test('explicit global fetch', async () => { + // make sure the global fetch type is assignable to our Fetch type + const client = new OpenAI({ + baseURL: 'http://localhost:5000/', + apiKey: 'My API Key', + fetch: defaultFetch, + }); + }); + test('custom signal', async () => { const client = new OpenAI({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', From 95886b57b1373c16e12a0ee1288d68cd8520695d Mon Sep 17 00:00:00 2001 From: Ali Tabesh Date: Tue, 21 Jan 2025 13:19:22 +0330 Subject: [PATCH 380/533] docs(readme): fix Realtime API example link (#1272) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e7d69a669..3bd386e99 100644 --- a/README.md +++ b/README.md @@ -153,7 +153,7 @@ rt.socket.addEventListener('open', () => { }); ``` -A full example can be found [here](https://github.com/openai/openai-node/blob/master/examples/realtime/web.ts). +A full example can be found [here](https://github.com/openai/openai-node/blob/master/examples/realtime/websocket.ts). ### Realtime error handling From 53149de69e19836568c1f1083ee7ee3c07123d1a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 09:49:52 +0000 Subject: [PATCH 381/533] release: 4.79.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 8d95306a8..06a612d67 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.79.1" + ".": "4.79.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d24eeffa5..9151619f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.79.2 (2025-01-21) + +Full Changelog: [v4.79.1...v4.79.2](https://github.com/openai/openai-node/compare/v4.79.1...v4.79.2) + +### Chores + +* **internal:** add test ([#1270](https://github.com/openai/openai-node/issues/1270)) ([b7c2d3d](https://github.com/openai/openai-node/commit/b7c2d3d9abd315f1452a578b0fd0d82e6ac4ff60)) + + +### Documentation + +* **readme:** fix Realtime API example link ([#1272](https://github.com/openai/openai-node/issues/1272)) ([d0653c7](https://github.com/openai/openai-node/commit/d0653c7fef48360d137a7411dfdfb95d477cdbc5)) + ## 4.79.1 (2025-01-17) Full Changelog: [v4.79.0...v4.79.1](https://github.com/openai/openai-node/compare/v4.79.0...v4.79.1) diff --git a/jsr.json b/jsr.json index 9f4dbe4b6..ce967d67a 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.79.1", + "version": "4.79.2", "exports": "./index.ts", "publish": { "exclude": [ diff --git a/package.json b/package.json index 2984cf2d8..07b2da77d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.79.1", + "version": "4.79.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 587a3c245..2cedb894b 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.79.1'; // x-release-please-version +export const VERSION = '4.79.2'; // x-release-please-version From e5e682f11783b14323f03ff9bf3298b8c6868136 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 21 Jan 2025 15:35:13 +0000 Subject: [PATCH 382/533] fix(jsr): export zod helpers --- jsr.json | 1 + 1 file changed, 1 insertion(+) diff --git a/jsr.json b/jsr.json index 35ee4e7ea..5819f2fa3 100644 --- a/jsr.json +++ b/jsr.json @@ -3,6 +3,7 @@ "version": "4.79.2", "exports": { ".": "./index.ts", + "./helpers/zod": "./helpers/zod.ts", "./beta/realtime/websocket": "./beta/realtime/websocket.ts" }, "publish": { From f5139d4aa281bd9a20b8cf5c801843f4d6c4bb3b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 15:53:54 +0000 Subject: [PATCH 383/533] release: 4.79.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 06a612d67..cdd63a113 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.79.2" + ".": "4.79.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9151619f9..8a1ce156f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.79.3 (2025-01-21) + +Full Changelog: [v4.79.2...v4.79.3](https://github.com/openai/openai-node/compare/v4.79.2...v4.79.3) + +### Bug Fixes + +* **jsr:** export zod helpers ([9dc55b6](https://github.com/openai/openai-node/commit/9dc55b62b564ad5ad1d4a60fe520b68235d05296)) + ## 4.79.2 (2025-01-21) Full Changelog: [v4.79.1...v4.79.2](https://github.com/openai/openai-node/compare/v4.79.1...v4.79.2) diff --git a/jsr.json b/jsr.json index 5819f2fa3..c070e4983 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.79.2", + "version": "4.79.3", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 07b2da77d..342f7c539 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.79.2", + "version": "4.79.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 2cedb894b..c2097ae42 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.79.2'; // x-release-please-version +export const VERSION = '4.79.3'; // x-release-please-version From a1d0ddc3b27b15700e355a476e8d183dae43987c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 17:22:40 +0000 Subject: [PATCH 384/533] docs: update deprecation messages (#1275) --- src/resources/chat/completions.ts | 24 ++++++++++++------------ src/resources/files.ts | 4 ++-- src/resources/fine-tuning/jobs/jobs.ts | 2 +- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 31f5814cb..88c778036 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -163,8 +163,8 @@ export interface ChatCompletionAssistantMessageParam { content?: string | Array | null; /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. */ function_call?: ChatCompletionAssistantMessageParam.FunctionCall | null; @@ -198,8 +198,8 @@ export namespace ChatCompletionAssistantMessageParam { } /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. */ export interface FunctionCall { /** @@ -360,8 +360,8 @@ export namespace ChatCompletionChunk { content?: string | null; /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. */ function_call?: Delta.FunctionCall; @@ -380,8 +380,8 @@ export namespace ChatCompletionChunk { export namespace Delta { /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. */ export interface FunctionCall { /** @@ -620,8 +620,8 @@ export interface ChatCompletionMessage { audio?: ChatCompletionAudio | null; /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. */ function_call?: ChatCompletionMessage.FunctionCall | null; @@ -633,8 +633,8 @@ export interface ChatCompletionMessage { export namespace ChatCompletionMessage { /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. */ export interface FunctionCall { /** diff --git a/src/resources/files.ts b/src/resources/files.ts index 43708310b..67bc95469 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -168,13 +168,13 @@ export interface FileObject { | 'vision'; /** - * @deprecated: Deprecated. The current status of the file, which can be either + * @deprecated Deprecated. The current status of the file, which can be either * `uploaded`, `processed`, or `error`. */ status: 'uploaded' | 'processed' | 'error'; /** - * @deprecated: Deprecated. For details on why a fine-tuning training file failed + * @deprecated Deprecated. For details on why a fine-tuning training file failed * validation, see the `error` field on `fine_tuning.job`. */ status_details?: string; diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 44dd011aa..9be03c302 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -516,7 +516,7 @@ export interface JobCreateParams { export namespace JobCreateParams { /** - * @deprecated: The hyperparameters used for the fine-tuning job. This value is now + * @deprecated The hyperparameters used for the fine-tuning job. This value is now * deprecated in favor of `method`, and should be passed in under the `method` * parameter. */ From c85dc9793ab6fb318b9ece1a557c4e00024265c1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 21:00:49 +0000 Subject: [PATCH 385/533] chore(internal): minor restructuring (#1278) --- src/internal/decoders/line.ts | 2 +- src/internal/stream-utils.ts | 32 +++++++++++++++++++++++++++++ src/streaming.ts | 38 +++-------------------------------- 3 files changed, 36 insertions(+), 36 deletions(-) create mode 100644 src/internal/stream-utils.ts diff --git a/src/internal/decoders/line.ts b/src/internal/decoders/line.ts index 1e0bbf390..34e41d1dc 100644 --- a/src/internal/decoders/line.ts +++ b/src/internal/decoders/line.ts @@ -1,6 +1,6 @@ import { OpenAIError } from '../../error'; -type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; +export type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; /** * A re-implementation of httpx's `LineDecoder` in Python that handles incrementally diff --git a/src/internal/stream-utils.ts b/src/internal/stream-utils.ts new file mode 100644 index 000000000..37f7793cf --- /dev/null +++ b/src/internal/stream-utils.ts @@ -0,0 +1,32 @@ +/** + * Most browsers don't yet have async iterable support for ReadableStream, + * and Node has a very different way of reading bytes from its "ReadableStream". + * + * This polyfill was pulled from https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490 + */ +export function ReadableStreamToAsyncIterable(stream: any): AsyncIterableIterator { + if (stream[Symbol.asyncIterator]) return stream; + + const reader = stream.getReader(); + return { + async next() { + try { + const result = await reader.read(); + if (result?.done) reader.releaseLock(); // release lock when stream becomes closed + return result; + } catch (e) { + reader.releaseLock(); // release lock when stream becomes errored + throw e; + } + }, + async return() { + const cancelPromise = reader.cancel(); + reader.releaseLock(); + await cancelPromise; + return { done: true, value: undefined }; + }, + [Symbol.asyncIterator]() { + return this; + }, + }; +} diff --git a/src/streaming.ts b/src/streaming.ts index 2891e6ac3..6a57a50a0 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -1,6 +1,7 @@ import { ReadableStream, type Response } from './_shims/index'; import { OpenAIError } from './error'; import { LineDecoder } from './internal/decoders/line'; +import { ReadableStreamToAsyncIterable } from './internal/stream-utils'; import { APIError } from './error'; @@ -96,7 +97,7 @@ export class Stream implements AsyncIterable { async function* iterLines(): AsyncGenerator { const lineDecoder = new LineDecoder(); - const iter = readableStreamAsyncIterable(readableStream); + const iter = ReadableStreamToAsyncIterable(readableStream); for await (const chunk of iter) { for (const line of lineDecoder.decode(chunk)) { yield line; @@ -210,7 +211,7 @@ export async function* _iterSSEMessages( const sseDecoder = new SSEDecoder(); const lineDecoder = new LineDecoder(); - const iter = readableStreamAsyncIterable(response.body); + const iter = ReadableStreamToAsyncIterable(response.body); for await (const sseChunk of iterSSEChunks(iter)) { for (const line of lineDecoder.decode(sseChunk)) { const sse = sseDecoder.decode(line); @@ -363,36 +364,3 @@ function partition(str: string, delimiter: string): [string, string, string] { return [str, '', '']; } - -/** - * Most browsers don't yet have async iterable support for ReadableStream, - * and Node has a very different way of reading bytes from its "ReadableStream". - * - * This polyfill was pulled from https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490 - */ -export function readableStreamAsyncIterable(stream: any): AsyncIterableIterator { - if (stream[Symbol.asyncIterator]) return stream; - - const reader = stream.getReader(); - return { - async next() { - try { - const result = await reader.read(); - if (result?.done) reader.releaseLock(); // release lock when stream becomes closed - return result; - } catch (e) { - reader.releaseLock(); // release lock when stream becomes errored - throw e; - } - }, - async return() { - const cancelPromise = reader.cancel(); - reader.releaseLock(); - await cancelPromise; - return { done: true, value: undefined }; - }, - [Symbol.asyncIterator]() { - return this; - }, - }; -} From e5aba740d98541e9ca7cb01998c27033c0f03c5f Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 21 Jan 2025 22:36:52 +0000 Subject: [PATCH 386/533] fix(jsr): correct zod config --- jsr.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/jsr.json b/jsr.json index c070e4983..8c24896f7 100644 --- a/jsr.json +++ b/jsr.json @@ -6,6 +6,9 @@ "./helpers/zod": "./helpers/zod.ts", "./beta/realtime/websocket": "./beta/realtime/websocket.ts" }, + "imports": { + "zod": "npm:zod@3" + }, "publish": { "exclude": [ "!." From 0fae08b33e6963c6b46e6318f23bada01d18f19f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 22:37:23 +0000 Subject: [PATCH 387/533] release: 4.79.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 18 ++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 22 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index cdd63a113..b1ab5c7b9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.79.3" + ".": "4.79.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a1ce156f..4254a9b8f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 4.79.4 (2025-01-21) + +Full Changelog: [v4.79.3...v4.79.4](https://github.com/openai/openai-node/compare/v4.79.3...v4.79.4) + +### Bug Fixes + +* **jsr:** correct zod config ([e45fa5f](https://github.com/openai/openai-node/commit/e45fa5f535ca74789636001e60e33edcad4db83c)) + + +### Chores + +* **internal:** minor restructuring ([#1278](https://github.com/openai/openai-node/issues/1278)) ([58ea92a](https://github.com/openai/openai-node/commit/58ea92a7464a04223f24ba31dbc0f7d0cf99cc19)) + + +### Documentation + +* update deprecation messages ([#1275](https://github.com/openai/openai-node/issues/1275)) ([1c6599e](https://github.com/openai/openai-node/commit/1c6599e47ef75a71cb309a1e14d97bc97bd036d0)) + ## 4.79.3 (2025-01-21) Full Changelog: [v4.79.2...v4.79.3](https://github.com/openai/openai-node/compare/v4.79.2...v4.79.3) diff --git a/jsr.json b/jsr.json index 8c24896f7..e6d772116 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.79.3", + "version": "4.79.4", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 342f7c539..d7a5555e5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.79.3", + "version": "4.79.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c2097ae42..e8b9601ed 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.79.3'; // x-release-please-version +export const VERSION = '4.79.4'; // x-release-please-version From 74776c6923b36b8b610063e0f5d8773bbd94313f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 13:20:44 +0000 Subject: [PATCH 388/533] feat(api): update enum values, comments, and examples (#1280) --- .stats.yml | 2 +- src/resources/audio/speech.ts | 8 ++--- src/resources/beta/realtime/realtime.ts | 32 +++++++++++-------- src/resources/beta/realtime/sessions.ts | 30 ++++++++++------- src/resources/chat/completions.ts | 9 ++---- src/resources/embeddings.ts | 3 +- .../beta/realtime/sessions.test.ts | 27 ++-------------- tests/api-resources/chat/completions.test.ts | 2 +- tests/api-resources/completions.test.ts | 2 +- 9 files changed, 49 insertions(+), 66 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9600edae3..d518bac58 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b5b0e2c794b012919701c3fd43286af10fa25d33ceb8a881bec2636028f446e0.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3904ef6b29a89c98f93a9b7da19879695f3c440564be6384db7af1b734611ede.yml diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index bd2ed9f65..35e82c4c1 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -33,12 +33,12 @@ export interface SpeechCreateParams { model: (string & {}) | SpeechModel; /** - * The voice to use when generating the audio. Supported voices are `alloy`, - * `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are - * available in the + * The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + * `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + * voices are available in the * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). */ - voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; + voice: 'alloy' | 'ash' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer'; /** * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 5de06917a..0fb66eb49 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -173,9 +173,10 @@ export interface ConversationItemCreateEvent { /** * The ID of the preceding item after which the new item will be inserted. If not - * set, the new item will be appended to the end of the conversation. If set, it - * allows an item to be inserted mid-conversation. If the ID cannot be found, an - * error will be returned and the item will not be added. + * set, the new item will be appended to the end of the conversation. If set to + * `root`, the new item will be added to the beginning of the conversation. If set + * to an existing ID, it allows an item to be inserted mid-conversation. If the ID + * cannot be found, an error will be returned and the item will not be added. */ previous_item_id?: string; } @@ -1705,17 +1706,9 @@ export namespace SessionUpdateEvent { */ export interface Session { /** - * The Realtime model used for this session. - */ - model: - | 'gpt-4o-realtime-preview' - | 'gpt-4o-realtime-preview-2024-10-01' - | 'gpt-4o-realtime-preview-2024-12-17' - | 'gpt-4o-mini-realtime-preview' - | 'gpt-4o-mini-realtime-preview-2024-12-17'; - - /** - * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; @@ -1756,8 +1749,19 @@ export namespace SessionUpdateEvent { */ modalities?: Array<'text' | 'audio'>; + /** + * The Realtime model used for this session. + */ + model?: + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + /** * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * For `pcm16`, output audio is sampled at a rate of 24kHz. */ output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index c1082d236..68c48db59 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -32,7 +32,9 @@ export interface Session { id?: string; /** - * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; @@ -86,6 +88,7 @@ export interface Session { /** * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * For `pcm16`, output audio is sampled at a rate of 24kHz. */ output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; @@ -372,17 +375,9 @@ export namespace SessionCreateResponse { export interface SessionCreateParams { /** - * The Realtime model used for this session. - */ - model: - | 'gpt-4o-realtime-preview' - | 'gpt-4o-realtime-preview-2024-10-01' - | 'gpt-4o-realtime-preview-2024-12-17' - | 'gpt-4o-mini-realtime-preview' - | 'gpt-4o-mini-realtime-preview-2024-12-17'; - - /** - * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; @@ -423,8 +418,19 @@ export interface SessionCreateParams { */ modalities?: Array<'text' | 'audio'>; + /** + * The Realtime model used for this session. + */ + model?: + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + /** * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * For `pcm16`, output audio is sampled at a rate of 24kHz. */ output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 88c778036..683eb5ed4 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -76,8 +76,7 @@ export interface ChatCompletion { object: 'chat.completion'; /** - * The service tier used for processing the request. This field is only included if - * the `service_tier` parameter is specified in the request. + * The service tier used for processing the request. */ service_tier?: 'scale' | 'default' | null; @@ -300,8 +299,7 @@ export interface ChatCompletionChunk { object: 'chat.completion.chunk'; /** - * The service tier used for processing the request. This field is only included if - * the `service_tier` parameter is specified in the request. + * The service tier used for processing the request. */ service_tier?: 'scale' | 'default' | null; @@ -1115,9 +1113,6 @@ export interface ChatCompletionCreateParamsBase { * - If set to 'default', the request will be processed using the default service * tier with a lower uptime SLA and no latency guarentee. * - When not set, the default behavior is 'auto'. - * - * When this parameter is set, the response body will include the `service_tier` - * utilized. */ service_tier?: 'auto' | 'default' | null; diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index 4b1644a68..d01ffc807 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -86,7 +86,8 @@ export interface EmbeddingCreateParams { * `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 * dimensions or less. * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - * for counting tokens. + * for counting tokens. Some models may also impose a limit on total number of + * tokens summed across inputs. */ input: string | Array | Array | Array>; diff --git a/tests/api-resources/beta/realtime/sessions.test.ts b/tests/api-resources/beta/realtime/sessions.test.ts index 0ed998c27..dbb92ead3 100644 --- a/tests/api-resources/beta/realtime/sessions.test.ts +++ b/tests/api-resources/beta/realtime/sessions.test.ts @@ -9,8 +9,8 @@ const client = new OpenAI({ }); describe('resource sessions', () => { - test('create: only required params', async () => { - const responsePromise = client.beta.realtime.sessions.create({ model: 'gpt-4o-realtime-preview' }); + test('create', async () => { + const responsePromise = client.beta.realtime.sessions.create({}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -19,27 +19,4 @@ describe('resource sessions', () => { expect(dataAndResponse.data).toBe(response); expect(dataAndResponse.response).toBe(rawResponse); }); - - test('create: required and optional params', async () => { - const response = await client.beta.realtime.sessions.create({ - model: 'gpt-4o-realtime-preview', - input_audio_format: 'pcm16', - input_audio_transcription: { model: 'model' }, - instructions: 'instructions', - max_response_output_tokens: 0, - modalities: ['text'], - output_audio_format: 'pcm16', - temperature: 0, - tool_choice: 'tool_choice', - tools: [{ description: 'description', name: 'name', parameters: {}, type: 'function' }], - turn_detection: { - create_response: true, - prefix_padding_ms: 0, - silence_duration_ms: 0, - threshold: 0, - type: 'type', - }, - voice: 'alloy', - }); - }); }); diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts index dfc09f69b..8f1bc7d4c 100644 --- a/tests/api-resources/chat/completions.test.ts +++ b/tests/api-resources/chat/completions.test.ts @@ -43,7 +43,7 @@ describe('resource completions', () => { presence_penalty: -2, reasoning_effort: 'low', response_format: { type: 'text' }, - seed: -9007199254740991, + seed: 0, service_tier: 'auto', stop: 'string', store: true, diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts index 82322dc3a..c98501a87 100644 --- a/tests/api-resources/completions.test.ts +++ b/tests/api-resources/completions.test.ts @@ -32,7 +32,7 @@ describe('resource completions', () => { max_tokens: 16, n: 1, presence_penalty: -2, - seed: -9007199254740991, + seed: 0, stop: '\n', stream: false, stream_options: { include_usage: true }, From 180b9ca1b5472d7697202a9220960a948bfbb9c8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 13:21:16 +0000 Subject: [PATCH 389/533] release: 4.80.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b1ab5c7b9..a21d67d78 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.79.4" + ".": "4.80.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 4254a9b8f..9126bf6a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.80.0 (2025-01-22) + +Full Changelog: [v4.79.4...v4.80.0](https://github.com/openai/openai-node/compare/v4.79.4...v4.80.0) + +### Features + +* **api:** update enum values, comments, and examples ([#1280](https://github.com/openai/openai-node/issues/1280)) ([d38f2c2](https://github.com/openai/openai-node/commit/d38f2c2648b6990f217c3c7d83ca31f3739641d3)) + ## 4.79.4 (2025-01-21) Full Changelog: [v4.79.3...v4.79.4](https://github.com/openai/openai-node/compare/v4.79.3...v4.79.4) diff --git a/jsr.json b/jsr.json index e6d772116..d79b07c2f 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.79.4", + "version": "4.80.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index d7a5555e5..fd85ffdd0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.79.4", + "version": "4.80.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index e8b9601ed..c9b6787c2 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.79.4'; // x-release-please-version +export const VERSION = '4.80.0'; // x-release-please-version From b7ab6bb304973ade94830f37eb646e800226d5ef Mon Sep 17 00:00:00 2001 From: hi019 <65871571+hi019@users.noreply.github.com> Date: Wed, 22 Jan 2025 12:57:18 -0800 Subject: [PATCH 390/533] docs: fix typo, "zodFunctionTool" -> "zodFunction" (#1128) --- helpers.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers.md b/helpers.md index abf980c82..aa3775a54 100644 --- a/helpers.md +++ b/helpers.md @@ -49,7 +49,7 @@ if (message?.parsed) { The `.parse()` method will also automatically parse `function` tool calls if: -- You use the `zodFunctionTool()` helper method +- You use the `zodFunction()` helper method - You mark your tool schema with `"strict": True` For example: From 9bfb778d547c34a6b7ed4168251786b1d6723985 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 22 Jan 2025 20:34:34 +0000 Subject: [PATCH 391/533] fix(azure): include retry count header --- src/index.ts | 7 +++++-- tests/lib/azure.test.ts | 12 ++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/index.ts b/src/index.ts index cf6aa89e3..944def00f 100644 --- a/src/index.ts +++ b/src/index.ts @@ -577,7 +577,10 @@ export class AzureOpenAI extends OpenAI { this._deployment = deployment; } - override buildRequest(options: Core.FinalRequestOptions): { + override buildRequest( + options: Core.FinalRequestOptions, + props: { retryCount?: number } = {}, + ): { req: RequestInit; url: string; timeout: number; @@ -591,7 +594,7 @@ export class AzureOpenAI extends OpenAI { options.path = `/deployments/${model}${options.path}`; } } - return super.buildRequest(options); + return super.buildRequest(options, props); } private async _getAzureADToken(): Promise { diff --git a/tests/lib/azure.test.ts b/tests/lib/azure.test.ts index 064a0098c..0e3c2c5a3 100644 --- a/tests/lib/azure.test.ts +++ b/tests/lib/azure.test.ts @@ -51,6 +51,18 @@ describe('instantiate azure client', () => { }); expect(req.headers as Headers).not.toHaveProperty('x-my-default-header'); }); + + test('includes retry count', () => { + const { req } = client.buildRequest( + { + path: '/foo', + method: 'post', + headers: { 'X-My-Default-Header': null }, + }, + { retryCount: 1 }, + ); + expect((req.headers as Headers)['x-stainless-retry-count']).toEqual('1'); + }); }); describe('defaultQuery', () => { From 654a2ac33d6b0bab723ec30ab734bbd9b693bbf3 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 22 Jan 2025 20:53:36 +0000 Subject: [PATCH 392/533] docs(helpers): fix type annotation --- helpers.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers.md b/helpers.md index aa3775a54..16bc1f277 100644 --- a/helpers.md +++ b/helpers.md @@ -226,7 +226,7 @@ on in the documentation page [Message](https://platform.openai.com/docs/api-refe ```ts .on('textCreated', (content: Text) => ...) -.on('textDelta', (delta: RunStepDelta, snapshot: Text) => ...) +.on('textDelta', (delta: TextDelta, snapshot: Text) => ...) .on('textDone', (content: Text, snapshot: Message) => ...) ``` From 3fcded9eb387e39bdf03a06b701710cf3075f990 Mon Sep 17 00:00:00 2001 From: Guspan Tanadi <36249910+guspan-tanadi@users.noreply.github.com> Date: Fri, 24 Jan 2025 19:16:27 +0700 Subject: [PATCH 393/533] docs(readme): fix realtime errors docs link (#1286) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3bd386e99..012511412 100644 --- a/README.md +++ b/README.md @@ -157,7 +157,7 @@ A full example can be found [here](https://github.com/openai/openai-node/blob/ma ### Realtime error handling -When an error is encountered, either on the client side or returned from the server through the [`error` event](https://platform.openai.com/docs/guides/realtime/realtime-api-beta#handling-errors), the `error` event listener will be fired. However, if you haven't registered an `error` event listener then an `unhandled Promise rejection` error will be thrown. +When an error is encountered, either on the client side or returned from the server through the [`error` event](https://platform.openai.com/docs/guides/realtime-model-capabilities#error-handling), the `error` event listener will be fired. However, if you haven't registered an `error` event listener then an `unhandled Promise rejection` error will be thrown. It is **highly recommended** that you register an `error` event listener and handle errors approriately as typically the underlying connection is still usable. From fb61fc2db45d2fb1f25016b70608714a93a80c9d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 24 Jan 2025 12:16:56 +0000 Subject: [PATCH 394/533] release: 4.80.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 15 +++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 19 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a21d67d78..d140407b9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.80.0" + ".": "4.80.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9126bf6a2..e4d4d73b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 4.80.1 (2025-01-24) + +Full Changelog: [v4.80.0...v4.80.1](https://github.com/openai/openai-node/compare/v4.80.0...v4.80.1) + +### Bug Fixes + +* **azure:** include retry count header ([3e0ba40](https://github.com/openai/openai-node/commit/3e0ba409e57ce276fb1f95cd11c801e4ccaad572)) + + +### Documentation + +* fix typo, "zodFunctionTool" -> "zodFunction" ([#1128](https://github.com/openai/openai-node/issues/1128)) ([b7ab6bb](https://github.com/openai/openai-node/commit/b7ab6bb304973ade94830f37eb646e800226d5ef)) +* **helpers:** fix type annotation ([fc019df](https://github.com/openai/openai-node/commit/fc019df1d9cc276e8f8e689742853a09aa94991a)) +* **readme:** fix realtime errors docs link ([#1286](https://github.com/openai/openai-node/issues/1286)) ([d1d50c8](https://github.com/openai/openai-node/commit/d1d50c897c18cefea964e8057fe1acfd766ae2bf)) + ## 4.80.0 (2025-01-22) Full Changelog: [v4.79.4...v4.80.0](https://github.com/openai/openai-node/compare/v4.79.4...v4.80.0) diff --git a/jsr.json b/jsr.json index d79b07c2f..e2ecad87f 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.80.0", + "version": "4.80.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index fd85ffdd0..497c7fae9 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.80.0", + "version": "4.80.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c9b6787c2..7d762daed 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.80.0'; // x-release-please-version +export const VERSION = '4.80.1'; // x-release-please-version From b4bb01ddd9f1c1f6ae41ddc11a9e1b707ef04764 Mon Sep 17 00:00:00 2001 From: Deyaaeldeen Almahallawi Date: Wed, 29 Jan 2025 09:45:25 -0600 Subject: [PATCH 395/533] feat(azure): Realtime API support (#1287) --- README.md | 22 ++++++++- examples/{azure.ts => azure/chat.ts} | 3 +- examples/azure/realtime/websocket.ts | 60 +++++++++++++++++++++++++ examples/azure/realtime/ws.ts | 67 ++++++++++++++++++++++++++++ examples/package.json | 1 + examples/realtime/ws.ts | 2 +- src/beta/realtime/internal-base.ts | 18 ++++++-- src/beta/realtime/websocket.ts | 54 ++++++++++++++++++++-- src/beta/realtime/ws.ts | 35 +++++++++++++-- src/index.ts | 8 ++-- 10 files changed, 251 insertions(+), 19 deletions(-) rename examples/{azure.ts => azure/chat.ts} (91%) create mode 100644 examples/azure/realtime/websocket.ts create mode 100644 examples/azure/realtime/ws.ts diff --git a/README.md b/README.md index 012511412..a1f4bf760 100644 --- a/README.md +++ b/README.md @@ -499,7 +499,7 @@ const credential = new DefaultAzureCredential(); const scope = 'https://cognitiveservices.azure.com/.default'; const azureADTokenProvider = getBearerTokenProvider(credential, scope); -const openai = new AzureOpenAI({ azureADTokenProvider }); +const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: "" }); const result = await openai.chat.completions.create({ model: 'gpt-4o', @@ -509,6 +509,26 @@ const result = await openai.chat.completions.create({ console.log(result.choices[0]!.message?.content); ``` +### Realtime API +This SDK provides real-time streaming capabilities for Azure OpenAI through the `OpenAIRealtimeWS` and `OpenAIRealtimeWebSocket` clients described previously. + +To utilize the real-time features, begin by creating a fully configured `AzureOpenAI` client and passing it into either `OpenAIRealtimeWS.azure` or `OpenAIRealtimeWebSocket.azure`. For example: + +```ts +const cred = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const deploymentName = 'gpt-4o-realtime-preview-1001'; +const azureADTokenProvider = getBearerTokenProvider(cred, scope); +const client = new AzureOpenAI({ + azureADTokenProvider, + apiVersion: '2024-10-01-preview', + deployment: deploymentName, +}); +const rt = await OpenAIRealtimeWS.azure(client); +``` + +Once the instance has been created, you can then begin sending requests and receiving streaming responses in real time. + ### Retries Certain errors will be automatically retried 2 times by default, with a short exponential backoff. diff --git a/examples/azure.ts b/examples/azure/chat.ts similarity index 91% rename from examples/azure.ts rename to examples/azure/chat.ts index 5fe1718fa..46df820f8 100755 --- a/examples/azure.ts +++ b/examples/azure/chat.ts @@ -2,6 +2,7 @@ import { AzureOpenAI } from 'openai'; import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; +import 'dotenv/config'; // Corresponds to your Model deployment within your OpenAI resource, e.g. gpt-4-1106-preview // Navigate to the Azure OpenAI Studio to deploy a model. @@ -13,7 +14,7 @@ const azureADTokenProvider = getBearerTokenProvider(credential, scope); // Make sure to set AZURE_OPENAI_ENDPOINT with the endpoint of your Azure resource. // You can find it in the Azure Portal. -const openai = new AzureOpenAI({ azureADTokenProvider }); +const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: '2024-10-01-preview' }); async function main() { console.log('Non-streaming:'); diff --git a/examples/azure/realtime/websocket.ts b/examples/azure/realtime/websocket.ts new file mode 100644 index 000000000..bec74e654 --- /dev/null +++ b/examples/azure/realtime/websocket.ts @@ -0,0 +1,60 @@ +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; +import { AzureOpenAI } from 'openai'; +import { DefaultAzureCredential, getBearerTokenProvider } from '@azure/identity'; +import 'dotenv/config'; + +async function main() { + const cred = new DefaultAzureCredential(); + const scope = 'https://cognitiveservices.azure.com/.default'; + const deploymentName = 'gpt-4o-realtime-preview-1001'; + const azureADTokenProvider = getBearerTokenProvider(cred, scope); + const client = new AzureOpenAI({ + azureADTokenProvider, + apiVersion: '2024-10-01-preview', + deployment: deploymentName, + }); + const rt = await OpenAIRealtimeWebSocket.azure(client); + + // access the underlying `ws.WebSocket` instance + rt.socket.addEventListener('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); + }); + + rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; + }); + + rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); + }); + + rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); + rt.on('response.text.done', () => console.log()); + + rt.on('response.done', () => rt.close()); + + rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); +} + +main(); diff --git a/examples/azure/realtime/ws.ts b/examples/azure/realtime/ws.ts new file mode 100644 index 000000000..ae20a1438 --- /dev/null +++ b/examples/azure/realtime/ws.ts @@ -0,0 +1,67 @@ +import { DefaultAzureCredential, getBearerTokenProvider } from '@azure/identity'; +import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; +import { AzureOpenAI } from 'openai'; +import 'dotenv/config'; + +async function main() { + const cred = new DefaultAzureCredential(); + const scope = 'https://cognitiveservices.azure.com/.default'; + const deploymentName = 'gpt-4o-realtime-preview-1001'; + const azureADTokenProvider = getBearerTokenProvider(cred, scope); + const client = new AzureOpenAI({ + azureADTokenProvider, + apiVersion: '2024-10-01-preview', + deployment: deploymentName, + }); + const rt = await OpenAIRealtimeWS.azure(client); + + // access the underlying `ws.WebSocket` instance + rt.socket.on('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); + }); + + rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; + }); + + rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); + }); + + rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); + rt.on('response.text.done', () => console.log()); + + rt.on('response.done', () => rt.close()); + + rt.socket.on('close', () => console.log('\nConnection closed!')); +} + +main(); diff --git a/examples/package.json b/examples/package.json index b8c34ac45..70ec2c523 100644 --- a/examples/package.json +++ b/examples/package.json @@ -7,6 +7,7 @@ "private": true, "dependencies": { "@azure/identity": "^4.2.0", + "dotenv": "^16.4.7", "express": "^4.18.2", "next": "^14.1.1", "openai": "file:..", diff --git a/examples/realtime/ws.ts b/examples/realtime/ws.ts index 4bbe85e5d..bba140800 100644 --- a/examples/realtime/ws.ts +++ b/examples/realtime/ws.ts @@ -9,7 +9,7 @@ async function main() { rt.send({ type: 'session.update', session: { - modalities: ['foo'] as any, + modalities: ['text'], model: 'gpt-4o-realtime-preview', }, }); diff --git a/src/beta/realtime/internal-base.ts b/src/beta/realtime/internal-base.ts index 391d69911..b704812ee 100644 --- a/src/beta/realtime/internal-base.ts +++ b/src/beta/realtime/internal-base.ts @@ -1,6 +1,7 @@ import { RealtimeClientEvent, RealtimeServerEvent, ErrorEvent } from '../../resources/beta/realtime/realtime'; import { EventEmitter } from '../../lib/EventEmitter'; import { OpenAIError } from '../../error'; +import OpenAI, { AzureOpenAI } from '../../index'; export class OpenAIRealtimeError extends OpenAIError { /** @@ -73,11 +74,20 @@ export abstract class OpenAIRealtimeEmitter extends EventEmitter } } -export function buildRealtimeURL(props: { baseURL: string; model: string }): URL { - const path = '/realtime'; +export function isAzure(client: Pick): client is AzureOpenAI { + return client instanceof AzureOpenAI; +} - const url = new URL(props.baseURL + (props.baseURL.endsWith('/') ? path.slice(1) : path)); +export function buildRealtimeURL(client: Pick, model: string): URL { + const path = '/realtime'; + const baseURL = client.baseURL; + const url = new URL(baseURL + (baseURL.endsWith('/') ? path.slice(1) : path)); url.protocol = 'wss'; - url.searchParams.set('model', props.model); + if (isAzure(client)) { + url.searchParams.set('api-version', client.apiVersion); + url.searchParams.set('deployment', model); + } else { + url.searchParams.set('model', model); + } return url; } diff --git a/src/beta/realtime/websocket.ts b/src/beta/realtime/websocket.ts index e0853779d..349cf5760 100644 --- a/src/beta/realtime/websocket.ts +++ b/src/beta/realtime/websocket.ts @@ -1,8 +1,8 @@ -import { OpenAI } from '../../index'; +import { AzureOpenAI, OpenAI } from '../../index'; import { OpenAIError } from '../../error'; import * as Core from '../../core'; import type { RealtimeClientEvent, RealtimeServerEvent } from '../../resources/beta/realtime/realtime'; -import { OpenAIRealtimeEmitter, buildRealtimeURL } from './internal-base'; +import { OpenAIRealtimeEmitter, buildRealtimeURL, isAzure } from './internal-base'; interface MessageEvent { data: string; @@ -26,6 +26,11 @@ export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { props: { model: string; dangerouslyAllowBrowser?: boolean; + /** + * Callback to mutate the URL, needed for Azure. + * @internal + */ + onURL?: (url: URL) => void; }, client?: Pick, ) { @@ -44,11 +49,13 @@ export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { client ??= new OpenAI({ dangerouslyAllowBrowser }); - this.url = buildRealtimeURL({ baseURL: client.baseURL, model: props.model }); + this.url = buildRealtimeURL(client, props.model); + props.onURL?.(this.url); + // @ts-ignore this.socket = new WebSocket(this.url, [ 'realtime', - `openai-insecure-api-key.${client.apiKey}`, + ...(isAzure(client) ? [] : [`openai-insecure-api-key.${client.apiKey}`]), 'openai-beta.realtime-v1', ]); @@ -77,6 +84,45 @@ export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { this.socket.addEventListener('error', (event: any) => { this._onError(null, event.message, null); }); + + if (isAzure(client)) { + if (this.url.searchParams.get('Authorization') !== null) { + this.url.searchParams.set('Authorization', ''); + } else { + this.url.searchParams.set('api-key', ''); + } + } + } + + static async azure( + client: AzureOpenAI, + options: { deploymentName?: string; dangerouslyAllowBrowser?: boolean } = {}, + ): Promise { + const token = await client._getAzureADToken(); + function onURL(url: URL) { + if (client.apiKey !== '') { + url.searchParams.set('api-key', client.apiKey); + } else { + if (token) { + url.searchParams.set('Authorization', `Bearer ${token}`); + } else { + throw new Error('AzureOpenAI is not instantiated correctly. No API key or token provided.'); + } + } + } + const deploymentName = options.deploymentName ?? client.deploymentName; + if (!deploymentName) { + throw new Error('No deployment name provided'); + } + const { dangerouslyAllowBrowser } = options; + return new OpenAIRealtimeWebSocket( + { + model: deploymentName, + onURL, + ...(dangerouslyAllowBrowser ? { dangerouslyAllowBrowser } : {}), + }, + client, + ); } send(event: RealtimeClientEvent) { diff --git a/src/beta/realtime/ws.ts b/src/beta/realtime/ws.ts index 631a36cd2..51339089c 100644 --- a/src/beta/realtime/ws.ts +++ b/src/beta/realtime/ws.ts @@ -1,7 +1,7 @@ import * as WS from 'ws'; -import { OpenAI } from '../../index'; +import { AzureOpenAI, OpenAI } from '../../index'; import type { RealtimeClientEvent, RealtimeServerEvent } from '../../resources/beta/realtime/realtime'; -import { OpenAIRealtimeEmitter, buildRealtimeURL } from './internal-base'; +import { OpenAIRealtimeEmitter, buildRealtimeURL, isAzure } from './internal-base'; export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { url: URL; @@ -14,12 +14,12 @@ export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { super(); client ??= new OpenAI(); - this.url = buildRealtimeURL({ baseURL: client.baseURL, model: props.model }); + this.url = buildRealtimeURL(client, props.model); this.socket = new WS.WebSocket(this.url, { ...props.options, headers: { ...props.options?.headers, - Authorization: `Bearer ${client.apiKey}`, + ...(isAzure(client) ? {} : { Authorization: `Bearer ${client.apiKey}` }), 'OpenAI-Beta': 'realtime=v1', }, }); @@ -51,6 +51,20 @@ export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { }); } + static async azure( + client: AzureOpenAI, + options: { deploymentName?: string; options?: WS.ClientOptions | undefined } = {}, + ): Promise { + const deploymentName = options.deploymentName ?? client.deploymentName; + if (!deploymentName) { + throw new Error('No deployment name provided'); + } + return new OpenAIRealtimeWS( + { model: deploymentName, options: { headers: await getAzureHeaders(client) } }, + client, + ); + } + send(event: RealtimeClientEvent) { try { this.socket.send(JSON.stringify(event)); @@ -67,3 +81,16 @@ export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { } } } + +async function getAzureHeaders(client: AzureOpenAI) { + if (client.apiKey !== '') { + return { 'api-key': client.apiKey }; + } else { + const token = await client._getAzureADToken(); + if (token) { + return { Authorization: `Bearer ${token}` }; + } else { + throw new Error('AzureOpenAI is not instantiated correctly. No API key or token provided.'); + } + } +} diff --git a/src/index.ts b/src/index.ts index 944def00f..3de224d90 100644 --- a/src/index.ts +++ b/src/index.ts @@ -491,7 +491,7 @@ export interface AzureClientOptions extends ClientOptions { /** API Client for interfacing with the Azure OpenAI API. */ export class AzureOpenAI extends OpenAI { private _azureADTokenProvider: (() => Promise) | undefined; - private _deployment: string | undefined; + deploymentName: string | undefined; apiVersion: string = ''; /** * API Client for interfacing with the Azure OpenAI API. @@ -574,7 +574,7 @@ export class AzureOpenAI extends OpenAI { this._azureADTokenProvider = azureADTokenProvider; this.apiVersion = apiVersion; - this._deployment = deployment; + this.deploymentName = deployment; } override buildRequest( @@ -589,7 +589,7 @@ export class AzureOpenAI extends OpenAI { if (!Core.isObj(options.body)) { throw new Error('Expected request body to be an object'); } - const model = this._deployment || options.body['model']; + const model = this.deploymentName || options.body['model']; if (model !== undefined && !this.baseURL.includes('/deployments')) { options.path = `/deployments/${model}${options.path}`; } @@ -597,7 +597,7 @@ export class AzureOpenAI extends OpenAI { return super.buildRequest(options, props); } - private async _getAzureADToken(): Promise { + async _getAzureADToken(): Promise { if (typeof this._azureADTokenProvider === 'function') { const token = await this._azureADTokenProvider(); if (!token || typeof token !== 'string') { From 6f89573f9b334960195b074e17ad70df32329e8e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 29 Jan 2025 15:45:55 +0000 Subject: [PATCH 396/533] release: 4.81.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d140407b9..de35570a8 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.80.1" + ".": "4.81.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e4d4d73b7..b24c0869d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.81.0 (2025-01-29) + +Full Changelog: [v4.80.1...v4.81.0](https://github.com/openai/openai-node/compare/v4.80.1...v4.81.0) + +### Features + +* **azure:** Realtime API support ([#1287](https://github.com/openai/openai-node/issues/1287)) ([fe090c0](https://github.com/openai/openai-node/commit/fe090c0a57570217eb0b431e2cce40bf61de2b75)) + ## 4.80.1 (2025-01-24) Full Changelog: [v4.80.0...v4.80.1](https://github.com/openai/openai-node/compare/v4.80.0...v4.80.1) diff --git a/jsr.json b/jsr.json index e2ecad87f..18d000862 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.80.1", + "version": "4.81.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 497c7fae9..07faa0019 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.80.1", + "version": "4.81.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 7d762daed..3b4d4eee5 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.80.1'; // x-release-please-version +export const VERSION = '4.81.0'; // x-release-please-version From a0519f5882e4ed1df388f5c7014a6e0d408cdc40 Mon Sep 17 00:00:00 2001 From: Deyaaeldeen Almahallawi Date: Fri, 31 Jan 2025 04:26:47 -0600 Subject: [PATCH 397/533] fix(examples/realtime): remove duplicate `session.update` call (#1293) --- examples/azure/realtime/ws.ts | 7 ------- examples/realtime/ws.ts | 7 ------- 2 files changed, 14 deletions(-) diff --git a/examples/azure/realtime/ws.ts b/examples/azure/realtime/ws.ts index ae20a1438..6ab7b742a 100644 --- a/examples/azure/realtime/ws.ts +++ b/examples/azure/realtime/ws.ts @@ -25,13 +25,6 @@ async function main() { model: 'gpt-4o-realtime-preview', }, }); - rt.send({ - type: 'session.update', - session: { - modalities: ['text'], - model: 'gpt-4o-realtime-preview', - }, - }); rt.send({ type: 'conversation.item.create', diff --git a/examples/realtime/ws.ts b/examples/realtime/ws.ts index bba140800..08c6fbcb6 100644 --- a/examples/realtime/ws.ts +++ b/examples/realtime/ws.ts @@ -13,13 +13,6 @@ async function main() { model: 'gpt-4o-realtime-preview', }, }); - rt.send({ - type: 'session.update', - session: { - modalities: ['text'], - model: 'gpt-4o-realtime-preview', - }, - }); rt.send({ type: 'conversation.item.create', From 608200f7cfdeca079a9a6457f9c306baf96c4712 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 19:08:33 +0000 Subject: [PATCH 398/533] feat(api): add o3-mini (#1295) fix(types): correct metadata type + other fixes --- .stats.yml | 2 +- api.md | 1 + src/index.ts | 1 + src/resources/audio/transcriptions.ts | 4 +- src/resources/batches.ts | 20 ++-- src/resources/beta/assistants.ts | 42 +++++--- src/resources/beta/realtime/realtime.ts | 89 ++++++++++++++-- src/resources/beta/realtime/sessions.ts | 35 ++++-- src/resources/beta/threads/messages.ts | 31 +++--- src/resources/beta/threads/runs/runs.ts | 39 ++++--- src/resources/beta/threads/runs/steps.ts | 11 +- src/resources/beta/threads/threads.ts | 100 +++++++++++------- .../beta/vector-stores/vector-stores.ts | 31 +++--- src/resources/chat/chat.ts | 2 + src/resources/chat/completions.ts | 14 ++- src/resources/shared.ts | 10 ++ src/resources/uploads/uploads.ts | 2 +- tests/api-resources/beta/assistants.test.ts | 6 +- .../beta/threads/messages.test.ts | 2 +- .../beta/threads/runs/runs.test.ts | 4 +- .../beta/threads/threads.test.ts | 18 ++-- 21 files changed, 320 insertions(+), 144 deletions(-) diff --git a/.stats.yml b/.stats.yml index d518bac58..e49b5c56e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3904ef6b29a89c98f93a9b7da19879695f3c440564be6384db7af1b734611ede.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-6204952a29973265b9c0d66fc67ffaf53c6a90ae4d75cdacf9d147676f5274c9.yml diff --git a/api.md b/api.md index 33ab95ef6..516188b20 100644 --- a/api.md +++ b/api.md @@ -5,6 +5,7 @@ Types: - ErrorObject - FunctionDefinition - FunctionParameters +- Metadata - ResponseFormatJSONObject - ResponseFormatJSONSchema - ResponseFormatText diff --git a/src/index.ts b/src/index.ts index 3de224d90..f860579d3 100644 --- a/src/index.ts +++ b/src/index.ts @@ -451,6 +451,7 @@ export declare namespace OpenAI { export type ErrorObject = API.ErrorObject; export type FunctionDefinition = API.FunctionDefinition; export type FunctionParameters = API.FunctionParameters; + export type Metadata = API.Metadata; export type ResponseFormatJSONObject = API.ResponseFormatJSONObject; export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; export type ResponseFormatText = API.ResponseFormatText; diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 0b6da4620..6d0a07e1e 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -166,8 +166,8 @@ export interface TranscriptionCreateParams< /** * The language of the input audio. Supplying the input language in - * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will - * improve accuracy and latency. + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. */ language?: string; diff --git a/src/resources/batches.ts b/src/resources/batches.ts index ec5ca6331..aadda83a6 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -4,6 +4,7 @@ import { APIResource } from '../resource'; import { isRequestOptions } from '../core'; import * as Core from '../core'; import * as BatchesAPI from './batches'; +import * as Shared from './shared'; import { CursorPage, type CursorPageParams } from '../pagination'; export class Batches extends APIResource { @@ -138,11 +139,13 @@ export interface Batch { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The ID of the file containing the outputs of successfully executed requests. @@ -237,9 +240,14 @@ export interface BatchCreateParams { input_file_id: string; /** - * Optional custom metadata for the batch. + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: Record | null; + metadata?: Shared.Metadata | null; } export interface BatchListParams extends CursorPageParams {} diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 0e657b1d4..69a5db520 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -111,11 +111,13 @@ export interface Assistant { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * ID of the model to use. You can use the @@ -1118,11 +1120,13 @@ export interface AssistantCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The name of the assistant. The maximum length is 256 characters. @@ -1242,12 +1246,14 @@ export namespace AssistantCreateParams { file_ids?: Array; /** - * Set of 16 key-value pairs that can be attached to a vector store. This can be - * useful for storing additional information about the vector store in a structured - * format. Keys can be a maximum of 64 characters long and values can be a maxium - * of 512 characters long. + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown; + metadata?: Shared.Metadata | null; } } } @@ -1267,11 +1273,13 @@ export interface AssistantUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * ID of the model to use. You can use the diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 0fb66eb49..c666221e1 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -2,6 +2,7 @@ import { APIResource } from '../../../resource'; import * as RealtimeAPI from './realtime'; +import * as Shared from '../../shared'; import * as SessionsAPI from './sessions'; import { Session as SessionsAPISession, @@ -741,9 +742,38 @@ export interface RealtimeResponse { id?: string; /** - * Developer-provided string key-value pairs associated with this response. + * Which conversation the response is added to, determined by the `conversation` + * field in the `response.create` event. If `auto`, the response will be added to + * the default conversation and the value of `conversation_id` will be an id like + * `conv_1234`. If `none`, the response will not be added to any conversation and + * the value of `conversation_id` will be `null`. If responses are being triggered + * by server VAD, the response will be added to the default conversation, thus the + * `conversation_id` will be an id like `conv_1234`. */ - metadata?: unknown | null; + conversation_id?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls, that was used in this response. + */ + max_output_tokens?: number | 'inf'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * The set of modalities the model used to respond. If there are multiple + * modalities, the model will pick one, for example if `modalities` is + * `["text", "audio"]`, the model could be responding in either text or audio. + */ + modalities?: Array<'text' | 'audio'>; /** * The object type, must be `realtime.response`. @@ -755,6 +785,11 @@ export interface RealtimeResponse { */ output?: Array; + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + /** * The final status of the response (`completed`, `cancelled`, `failed`, or * `incomplete`). @@ -766,6 +801,11 @@ export interface RealtimeResponse { */ status_details?: RealtimeResponseStatus; + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + /** * Usage statistics for the Response, this will correspond to billing. A Realtime * API session will maintain a conversation context and append new Items to the @@ -773,6 +813,12 @@ export interface RealtimeResponse { * become the input for later turns. */ usage?: RealtimeResponseUsage; + + /** + * The voice the model used to respond. Current voice options are `alloy`, `ash`, + * `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; } /** @@ -1320,11 +1366,13 @@ export namespace ResponseCreateEvent { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maximum of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The set of modalities the model can respond with. To disable audio, set this to @@ -1716,8 +1764,11 @@ export namespace SessionUpdateEvent { * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as rough guidance rather than the representation + * understood by the model. The client can optionally set the language and prompt + * for transcription, these fields will be passed to the Whisper API. */ input_audio_transcription?: Session.InputAudioTranscription; @@ -1801,15 +1852,33 @@ export namespace SessionUpdateEvent { * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as rough guidance rather than the representation + * understood by the model. The client can optionally set the language and prompt + * for transcription, these fields will be passed to the Whisper API. */ export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + /** * The model to use for transcription, `whisper-1` is the only currently supported * model. */ model?: string; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. The + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + * should match the audio language. + */ + prompt?: string; } export interface Tool { diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index 68c48db59..d2afa25b1 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -203,7 +203,7 @@ export interface SessionCreateResponse { /** * Ephemeral key returned by the API. */ - client_secret?: SessionCreateResponse.ClientSecret; + client_secret: SessionCreateResponse.ClientSecret; /** * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. @@ -292,14 +292,14 @@ export namespace SessionCreateResponse { * Timestamp for when the token expires. Currently, all tokens expire after one * minute. */ - expires_at?: number; + expires_at: number; /** * Ephemeral key usable in client environments to authenticate connections to the * Realtime API. Use this in client-side environments rather than a standard API * token, which should only be used server-side. */ - value?: string; + value: string; } /** @@ -385,8 +385,11 @@ export interface SessionCreateParams { * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as rough guidance rather than the representation + * understood by the model. The client can optionally set the language and prompt + * for transcription, these fields will be passed to the Whisper API. */ input_audio_transcription?: SessionCreateParams.InputAudioTranscription; @@ -470,15 +473,33 @@ export namespace SessionCreateParams { * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as rough guidance rather than the representation + * understood by the model. The client can optionally set the language and prompt + * for transcription, these fields will be passed to the Whisper API. */ export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + /** * The model to use for transcription, `whisper-1` is the only currently supported * model. */ model?: string; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. The + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + * should match the audio language. + */ + prompt?: string; } export interface Tool { diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 8124f56cd..29fd2b29f 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -3,6 +3,7 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; +import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; import { CursorPage, type CursorPageParams } from '../../../pagination'; @@ -407,11 +408,13 @@ export interface Message { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The object type, which is always `thread.message`. @@ -660,11 +663,13 @@ export interface MessageCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export namespace MessageCreateParams { @@ -693,11 +698,13 @@ export namespace MessageCreateParams { export interface MessageUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export interface MessageListParams extends CursorPageParams { diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 814ad3e89..84ba7b63c 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -8,6 +8,7 @@ import { AssistantStream, RunCreateParamsBaseStream } from '../../../../lib/Assi import { sleep } from '../../../../core'; import { RunSubmitToolOutputsParamsStream } from '../../../../lib/AssistantStream'; import * as RunsAPI from './runs'; +import * as Shared from '../../../shared'; import * as AssistantsAPI from '../../assistants'; import * as ChatAPI from '../../../chat/chat'; import * as MessagesAPI from '../messages'; @@ -415,11 +416,13 @@ export interface Run { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The model that the @@ -705,10 +708,12 @@ export interface RunCreateParamsBase { /** * Body param: Set of 16 key-value pairs that can be attached to an object. This * can be useful for storing additional information about the object in a - * structured format. Keys can be a maximum of 64 characters long and values can be - * a maxium of 512 characters long. + * structured format, and querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * Body param: The ID of the @@ -823,11 +828,13 @@ export namespace RunCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export namespace AdditionalMessage { @@ -898,11 +905,13 @@ export interface RunCreateParamsStreaming extends RunCreateParamsBase { export interface RunUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export interface RunListParams extends CursorPageParams { diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index 6c6722b62..c491b4e83 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -4,6 +4,7 @@ import { APIResource } from '../../../../resource'; import { isRequestOptions } from '../../../../core'; import * as Core from '../../../../core'; import * as StepsAPI from './steps'; +import * as Shared from '../../../shared'; import { CursorPage, type CursorPageParams } from '../../../../pagination'; export class Steps extends APIResource { @@ -515,11 +516,13 @@ export interface RunStep { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The object type, which is always `thread.run.step`. diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 453d8fa10..3f69c6e60 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -250,11 +250,13 @@ export interface Thread { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The object type, which is always `thread`. @@ -322,11 +324,13 @@ export interface ThreadCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * A set of resources that are made available to the assistant's tools in this @@ -361,11 +365,13 @@ export namespace ThreadCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export namespace Message { @@ -447,12 +453,14 @@ export namespace ThreadCreateParams { file_ids?: Array; /** - * Set of 16 key-value pairs that can be attached to a vector store. This can be - * useful for storing additional information about the vector store in a structured - * format. Keys can be a maximum of 64 characters long and values can be a maxium - * of 512 characters long. + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown; + metadata?: Shared.Metadata | null; } } } @@ -461,11 +469,13 @@ export namespace ThreadCreateParams { export interface ThreadUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * A set of resources that are made available to the assistant's tools in this @@ -549,11 +559,13 @@ export interface ThreadCreateAndRunParamsBase { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -609,7 +621,8 @@ export interface ThreadCreateAndRunParamsBase { temperature?: number | null; /** - * If no thread is provided, an empty thread will be created. + * Options to create a new thread. If no thread is provided when running a request, + * an empty thread will be created. */ thread?: ThreadCreateAndRunParams.Thread; @@ -658,7 +671,8 @@ export interface ThreadCreateAndRunParamsBase { export namespace ThreadCreateAndRunParams { /** - * If no thread is provided, an empty thread will be created. + * Options to create a new thread. If no thread is provided when running a request, + * an empty thread will be created. */ export interface Thread { /** @@ -669,11 +683,13 @@ export namespace ThreadCreateAndRunParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * A set of resources that are made available to the assistant's tools in this @@ -708,11 +724,13 @@ export namespace ThreadCreateAndRunParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export namespace Message { @@ -794,12 +812,14 @@ export namespace ThreadCreateAndRunParams { file_ids?: Array; /** - * Set of 16 key-value pairs that can be attached to a vector store. This can be - * useful for storing additional information about the vector store in a structured - * format. Keys can be a maximum of 64 characters long and values can be a maxium - * of 512 characters long. + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown; + metadata?: Shared.Metadata | null; } } } diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts index cbff2d562..8438b79da 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/beta/vector-stores/vector-stores.ts @@ -3,6 +3,7 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; +import * as Shared from '../../shared'; import * as FileBatchesAPI from './file-batches'; import { FileBatchCreateParams, @@ -187,11 +188,13 @@ export interface VectorStore { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The name of the vector store. @@ -300,11 +303,13 @@ export interface VectorStoreCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The name of the vector store. @@ -338,11 +343,13 @@ export interface VectorStoreUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The name of the vector store. diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 2230b19bd..d4a18929c 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -46,6 +46,8 @@ export class Chat extends APIResource { } export type ChatModel = + | 'o3-mini' + | 'o3-mini-2025-01-31' | 'o1' | 'o1-2024-12-17' | 'o1-preview' diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 683eb5ed4..d2de11458 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -1012,10 +1012,14 @@ export interface ChatCompletionCreateParamsBase { max_tokens?: number | null; /** - * Developer-defined tags and values used for filtering completions in the - * [dashboard](https://platform.openai.com/chat-completions). + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: Record | null; + metadata?: Shared.Metadata | null; /** * Output types that you would like the model to generate for this request. Most @@ -1109,9 +1113,9 @@ export interface ChatCompletionCreateParamsBase { * utilize scale tier credits until they are exhausted. * - If set to 'auto', and the Project is not Scale tier enabled, the request will * be processed using the default service tier with a lower uptime SLA and no - * latency guarentee. + * latency guarantee. * - If set to 'default', the request will be processed using the default service - * tier with a lower uptime SLA and no latency guarentee. + * tier with a lower uptime SLA and no latency guarantee. * - When not set, the default behavior is 'auto'. */ service_tier?: 'auto' | 'default' | null; diff --git a/src/resources/shared.ts b/src/resources/shared.ts index f44fda8a7..3bb11582f 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -55,6 +55,16 @@ export interface FunctionDefinition { */ export type FunctionParameters = Record; +/** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ +export type Metadata = Record; + export interface ResponseFormatJSONObject { /** * The type of response format being defined: `json_object` diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index 8491d0fe2..bfe752cd7 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -113,7 +113,7 @@ export interface Upload { status: 'pending' | 'completed' | 'cancelled' | 'expired'; /** - * The ready File object after the Upload is completed. + * The `File` object represents a document that has been uploaded to OpenAI. */ file?: FilesAPI.FileObject | null; } diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index a64465c77..88a10ba8f 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -25,7 +25,7 @@ describe('resource assistants', () => { model: 'gpt-4o', description: 'description', instructions: 'instructions', - metadata: {}, + metadata: { foo: 'string' }, name: 'name', response_format: 'auto', temperature: 1, @@ -33,7 +33,9 @@ describe('resource assistants', () => { code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }], + vector_stores: [ + { chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: { foo: 'string' } }, + ], }, }, tools: [{ type: 'code_interpreter' }], diff --git a/tests/api-resources/beta/threads/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts index c1f5f7b6e..e125edd84 100644 --- a/tests/api-resources/beta/threads/messages.test.ts +++ b/tests/api-resources/beta/threads/messages.test.ts @@ -28,7 +28,7 @@ describe('resource messages', () => { content: 'string', role: 'user', attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], - metadata: {}, + metadata: { foo: 'string' }, }); }); diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 4fd8261ac..9b728403f 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -30,13 +30,13 @@ describe('resource runs', () => { content: 'string', role: 'user', attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], - metadata: {}, + metadata: { foo: 'string' }, }, ], instructions: 'instructions', max_completion_tokens: 256, max_prompt_tokens: 256, - metadata: {}, + metadata: { foo: 'string' }, model: 'gpt-4o', parallel_tool_calls: true, response_format: 'auto', diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index aba266316..f26d6ec44 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -37,15 +37,17 @@ describe('resource threads', () => { content: 'string', role: 'user', attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], - metadata: {}, + metadata: { foo: 'string' }, }, ], - metadata: {}, + metadata: { foo: 'string' }, tool_resources: { code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }], + vector_stores: [ + { chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: { foo: 'string' } }, + ], }, }, }, @@ -118,7 +120,7 @@ describe('resource threads', () => { instructions: 'instructions', max_completion_tokens: 256, max_prompt_tokens: 256, - metadata: {}, + metadata: { foo: 'string' }, model: 'gpt-4o', parallel_tool_calls: true, response_format: 'auto', @@ -130,15 +132,17 @@ describe('resource threads', () => { content: 'string', role: 'user', attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], - metadata: {}, + metadata: { foo: 'string' }, }, ], - metadata: {}, + metadata: { foo: 'string' }, tool_resources: { code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], - vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }], + vector_stores: [ + { chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: { foo: 'string' } }, + ], }, }, }, From 145ff671d3a8111c81497f6bc9cd0cb5053a6cb0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 19:09:24 +0000 Subject: [PATCH 399/533] release: 4.82.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index de35570a8..b2ee58e08 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.81.0" + ".": "4.82.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index b24c0869d..7565cb01a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.82.0 (2025-01-31) + +Full Changelog: [v4.81.0...v4.82.0](https://github.com/openai/openai-node/compare/v4.81.0...v4.82.0) + +### Features + +* **api:** add o3-mini ([#1295](https://github.com/openai/openai-node/issues/1295)) ([378e2f7](https://github.com/openai/openai-node/commit/378e2f7af62c570adb4c7644a4d49576b698de41)) + + +### Bug Fixes + +* **examples/realtime:** remove duplicate `session.update` call ([#1293](https://github.com/openai/openai-node/issues/1293)) ([ad800b4](https://github.com/openai/openai-node/commit/ad800b4f9410c6838994c24a3386ea708717f72b)) +* **types:** correct metadata type + other fixes ([378e2f7](https://github.com/openai/openai-node/commit/378e2f7af62c570adb4c7644a4d49576b698de41)) + ## 4.81.0 (2025-01-29) Full Changelog: [v4.80.1...v4.81.0](https://github.com/openai/openai-node/compare/v4.80.1...v4.81.0) diff --git a/jsr.json b/jsr.json index 18d000862..7569332ce 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.81.0", + "version": "4.82.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 07faa0019..42e00822d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.81.0", + "version": "4.82.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 3b4d4eee5..07241a8cf 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.81.0'; // x-release-please-version +export const VERSION = '4.82.0'; // x-release-please-version From 7cf2a8571fb3c40ce3e67759af314e37bc3467e0 Mon Sep 17 00:00:00 2001 From: Deyaaeldeen Almahallawi Date: Mon, 3 Feb 2025 11:22:06 -0600 Subject: [PATCH 400/533] fix(azure/audio): use model param for deployments (#1297) --- src/core.ts | 2 ++ src/index.ts | 2 +- src/resources/audio/transcriptions.ts | 5 ++++- src/resources/audio/translations.ts | 5 ++++- tests/lib/azure.test.ts | 10 ++++++---- 5 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/core.ts b/src/core.ts index 3d2d029a5..23d19b5bd 100644 --- a/src/core.ts +++ b/src/core.ts @@ -814,6 +814,7 @@ export type RequestOptions< signal?: AbortSignal | undefined | null; idempotencyKey?: string; + __metadata?: Record; __binaryRequest?: boolean | undefined; __binaryResponse?: boolean | undefined; __streamClass?: typeof Stream; @@ -836,6 +837,7 @@ const requestOptionsKeys: KeysEnum = { signal: true, idempotencyKey: true, + __metadata: true, __binaryRequest: true, __binaryResponse: true, __streamClass: true, diff --git a/src/index.ts b/src/index.ts index f860579d3..f4e940af8 100644 --- a/src/index.ts +++ b/src/index.ts @@ -590,7 +590,7 @@ export class AzureOpenAI extends OpenAI { if (!Core.isObj(options.body)) { throw new Error('Expected request body to be an object'); } - const model = this.deploymentName || options.body['model']; + const model = this.deploymentName || options.body['model'] || options.__metadata?.['model']; if (model !== undefined && !this.baseURL.includes('/deployments')) { options.path = `/deployments/${model}${options.path}`; } diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 6d0a07e1e..d0e671243 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -25,7 +25,10 @@ export class Transcriptions extends APIResource { body: TranscriptionCreateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/audio/transcriptions', Core.multipartFormRequestOptions({ body, ...options })); + return this._client.post( + '/audio/transcriptions', + Core.multipartFormRequestOptions({ body, ...options, __metadata: { model: body.model } }), + ); } } diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index c6bf7c870..0621deecb 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -26,7 +26,10 @@ export class Translations extends APIResource { body: TranslationCreateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/audio/translations', Core.multipartFormRequestOptions({ body, ...options })); + return this._client.post( + '/audio/translations', + Core.multipartFormRequestOptions({ body, ...options, __metadata: { model: body.model } }), + ); } } diff --git a/tests/lib/azure.test.ts b/tests/lib/azure.test.ts index 0e3c2c5a3..430efbe57 100644 --- a/tests/lib/azure.test.ts +++ b/tests/lib/azure.test.ts @@ -495,21 +495,23 @@ describe('azure request building', () => { ); }); - test('Audio translations is not handled', async () => { + test('handles audio translations', async () => { const { url } = (await client.audio.translations.create({ model: deployment, file: { url: 'https://example.com/', blob: () => 0 as any }, })) as any; - expect(url).toStrictEqual(`https://example.com/openai/audio/translations?api-version=${apiVersion}`); + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/audio/translations?api-version=${apiVersion}`, + ); }); - test('Audio transcriptions is not handled', async () => { + test('handles audio transcriptions', async () => { const { url } = (await client.audio.transcriptions.create({ model: deployment, file: { url: 'https://example.com/', blob: () => 0 as any }, })) as any; expect(url).toStrictEqual( - `https://example.com/openai/audio/transcriptions?api-version=${apiVersion}`, + `https://example.com/openai/deployments/${deployment}/audio/transcriptions?api-version=${apiVersion}`, ); }); From 29a86274c3965826e132373fccbea430efb3bacd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 18:39:40 +0000 Subject: [PATCH 401/533] feat(client): send `X-Stainless-Timeout` header (#1299) --- src/core.ts | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/core.ts b/src/core.ts index 23d19b5bd..6578c0781 100644 --- a/src/core.ts +++ b/src/core.ts @@ -315,6 +315,7 @@ export abstract class APIClient { options: FinalRequestOptions, { retryCount = 0 }: { retryCount?: number } = {}, ): { req: RequestInit; url: string; timeout: number } { + options = { ...options }; const { method, path, query, headers: headers = {} } = options; const body = @@ -327,9 +328,9 @@ export abstract class APIClient { const url = this.buildURL(path!, query); if ('timeout' in options) validatePositiveInteger('timeout', options.timeout); - const timeout = options.timeout ?? this.timeout; + options.timeout = options.timeout ?? this.timeout; const httpAgent = options.httpAgent ?? this.httpAgent ?? getDefaultAgent(url); - const minAgentTimeout = timeout + 1000; + const minAgentTimeout = options.timeout + 1000; if ( typeof (httpAgent as any)?.options?.timeout === 'number' && minAgentTimeout > ((httpAgent as any).options.timeout ?? 0) @@ -358,7 +359,7 @@ export abstract class APIClient { signal: options.signal ?? null, }; - return { req, url, timeout }; + return { req, url, timeout: options.timeout }; } private buildHeaders({ @@ -386,15 +387,22 @@ export abstract class APIClient { delete reqHeaders['content-type']; } - // Don't set the retry count header if it was already set or removed through default headers or by the - // caller. We check `defaultHeaders` and `headers`, which can contain nulls, instead of `reqHeaders` to - // account for the removal case. + // Don't set theses headers if they were already set or removed through default headers or by the caller. + // We check `defaultHeaders` and `headers`, which can contain nulls, instead of `reqHeaders` to account + // for the removal case. if ( getHeader(defaultHeaders, 'x-stainless-retry-count') === undefined && getHeader(headers, 'x-stainless-retry-count') === undefined ) { reqHeaders['x-stainless-retry-count'] = String(retryCount); } + if ( + getHeader(defaultHeaders, 'x-stainless-timeout') === undefined && + getHeader(headers, 'x-stainless-timeout') === undefined && + options.timeout + ) { + reqHeaders['x-stainless-timeout'] = String(options.timeout); + } this.validateHeaders(reqHeaders, headers); From bcf459fb5594d3d7198d95c5569cac4aa6bd483e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 11:26:36 +0000 Subject: [PATCH 402/533] fix(api/types): correct audio duration & role types (#1300) --- .stats.yml | 2 +- api.md | 1 + src/lib/ChatCompletionStream.ts | 3 +- src/resources/audio/transcriptions.ts | 2 +- src/resources/audio/translations.ts | 2 +- src/resources/beta/realtime/realtime.ts | 79 +++++++++++++++++++++++-- src/resources/chat/completions.ts | 4 +- 7 files changed, 83 insertions(+), 10 deletions(-) diff --git a/.stats.yml b/.stats.yml index e49b5c56e..df7877dfd 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-6204952a29973265b9c0d66fc67ffaf53c6a90ae4d75cdacf9d147676f5274c9.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fc5dbc19505b0035f9e7f88868619f4fb519b048bde011f6154f3132d4be71fb.yml diff --git a/api.md b/api.md index 516188b20..01854a8e0 100644 --- a/api.md +++ b/api.md @@ -229,6 +229,7 @@ Types: - ConversationItemInputAudioTranscriptionFailedEvent - ConversationItemTruncateEvent - ConversationItemTruncatedEvent +- ConversationItemWithReference - ErrorEvent - InputAudioBufferAppendEvent - InputAudioBufferClearEvent diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts index a88f8a23b..6c846f70b 100644 --- a/src/lib/ChatCompletionStream.ts +++ b/src/lib/ChatCompletionStream.ts @@ -12,6 +12,7 @@ import { type ChatCompletionCreateParams, type ChatCompletionCreateParamsStreaming, type ChatCompletionCreateParamsBase, + type ChatCompletionRole, } from '../resources/chat/completions'; import { AbstractChatCompletionRunner, @@ -797,7 +798,7 @@ export namespace ChatCompletionSnapshot { /** * The role of the author of this message. */ - role?: 'system' | 'user' | 'assistant' | 'function' | 'tool'; + role?: ChatCompletionRole; } export namespace Message { diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index d0e671243..6fbe96b58 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -106,7 +106,7 @@ export interface TranscriptionVerbose { /** * The duration of the input audio. */ - duration: string; + duration: number; /** * The language of the input audio. diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 0621deecb..dac519ede 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -41,7 +41,7 @@ export interface TranslationVerbose { /** * The duration of the input audio. */ - duration: string; + duration: number; /** * The language of the output translation (always `english`). diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index c666221e1..e46dcdaaf 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -439,6 +439,76 @@ export interface ConversationItemTruncatedEvent { type: 'conversation.item.truncated'; } +/** + * The item to add to the conversation. + */ +export interface ConversationItemWithReference { + /** + * For an item of type (`message` | `function_call` | `function_call_output`) this + * field allows the client to assign the unique ID of the item. It is not required + * because the server will generate one if not provided. + * + * For an item of type `item_reference`, this field is required and is a reference + * to any item that has previously existed in the conversation. + */ + id?: string; + + /** + * The arguments of the function call (for `function_call` items). + */ + arguments?: string; + + /** + * The ID of the function call (for `function_call` and `function_call_output` + * items). If passed on a `function_call_output` item, the server will check that a + * `function_call` item with the same ID exists in the conversation history. + */ + call_id?: string; + + /** + * The content of the message, applicable for `message` items. + * + * - Message items of role `system` support only `input_text` content + * - Message items of role `user` support `input_text` and `input_audio` content + * - Message items of role `assistant` support `text` content. + */ + content?: Array; + + /** + * The name of the function being called (for `function_call` items). + */ + name?: string; + + /** + * Identifier for the API object being returned - always `realtime.item`. + */ + object?: 'realtime.item'; + + /** + * The output of the function call (for `function_call_output` items). + */ + output?: string; + + /** + * The role of the message sender (`user`, `assistant`, `system`), only applicable + * for `message` items. + */ + role?: 'user' | 'assistant' | 'system'; + + /** + * The status of the item (`completed`, `incomplete`). These have no effect on the + * conversation, but are accepted for consistency with the + * `conversation.item.created` event. + */ + status?: 'completed' | 'incomplete'; + + /** + * The type of the item (`message`, `function_call`, `function_call_output`, + * `item_reference`). + */ + type?: 'message' | 'function_call' | 'function_call_output' | 'item_reference'; +} + /** * Returned when an error occurs, which could be a client problem or a server * problem. Most errors are recoverable and the session will stay open, we @@ -1336,11 +1406,12 @@ export namespace ResponseCreateEvent { conversation?: (string & {}) | 'auto' | 'none'; /** - * Input items to include in the prompt for the model. Creates a new context for - * this response, without including the default conversation. Can include - * references to items from the default conversation. + * Input items to include in the prompt for the model. Using this field creates a + * new context for this Response instead of using the default conversation. An + * empty array `[]` will clear the context for this Response. Note that this can + * include references to items from the default conversation. */ - input?: Array; + input?: Array; /** * The default system instructions (i.e. system message) prepended to model calls. diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index d2de11458..55b008cf0 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -371,7 +371,7 @@ export namespace ChatCompletionChunk { /** * The role of the author of this message. */ - role?: 'system' | 'user' | 'assistant' | 'tool'; + role?: 'developer' | 'system' | 'user' | 'assistant' | 'tool'; tool_calls?: Array; } @@ -756,7 +756,7 @@ export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high'; /** * The role of the author of a message */ -export type ChatCompletionRole = 'system' | 'user' | 'assistant' | 'tool' | 'function'; +export type ChatCompletionRole = 'developer' | 'system' | 'user' | 'assistant' | 'tool' | 'function'; /** * Options for streaming response. Only set this when you set `stream: true`. From 41a7ce315f3ee4495ae259d9bbed77701dc52430 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 12:26:03 +0000 Subject: [PATCH 403/533] release: 4.83.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b2ee58e08..6eb0f130e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.82.0" + ".": "4.83.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7565cb01a..f61def5e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.83.0 (2025-02-05) + +Full Changelog: [v4.82.0...v4.83.0](https://github.com/openai/openai-node/compare/v4.82.0...v4.83.0) + +### Features + +* **client:** send `X-Stainless-Timeout` header ([#1299](https://github.com/openai/openai-node/issues/1299)) ([ddfc686](https://github.com/openai/openai-node/commit/ddfc686f43a3420c3adf8dec2e82b4d10a121eb8)) + + +### Bug Fixes + +* **api/types:** correct audio duration & role types ([#1300](https://github.com/openai/openai-node/issues/1300)) ([a955ac2](https://github.com/openai/openai-node/commit/a955ac2bf5bee663d530d0c82b0005bf3ce6fc47)) +* **azure/audio:** use model param for deployments ([#1297](https://github.com/openai/openai-node/issues/1297)) ([85de382](https://github.com/openai/openai-node/commit/85de382db17cbe5f112650e79d0fc1cc841efbb2)) + ## 4.82.0 (2025-01-31) Full Changelog: [v4.81.0...v4.82.0](https://github.com/openai/openai-node/compare/v4.81.0...v4.82.0) diff --git a/jsr.json b/jsr.json index 7569332ce..6fa05e624 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.82.0", + "version": "4.83.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 42e00822d..bd507e9f8 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.82.0", + "version": "4.83.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 07241a8cf..13c764d7d 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.82.0'; // x-release-please-version +export const VERSION = '4.83.0'; // x-release-please-version From 2a43456b2e085f79ff3ebebdfa55c65f68dfbe56 Mon Sep 17 00:00:00 2001 From: Minh Anh Date: Wed, 5 Feb 2025 11:29:45 -0800 Subject: [PATCH 404/533] Fix Azure OpenAI client import --- src/beta/realtime/websocket.ts | 2 +- src/beta/realtime/ws.ts | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/beta/realtime/websocket.ts b/src/beta/realtime/websocket.ts index 349cf5760..e8143fdbf 100644 --- a/src/beta/realtime/websocket.ts +++ b/src/beta/realtime/websocket.ts @@ -95,7 +95,7 @@ export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { } static async azure( - client: AzureOpenAI, + client: Pick, options: { deploymentName?: string; dangerouslyAllowBrowser?: boolean } = {}, ): Promise { const token = await client._getAzureADToken(); diff --git a/src/beta/realtime/ws.ts b/src/beta/realtime/ws.ts index 51339089c..3f51dfc4b 100644 --- a/src/beta/realtime/ws.ts +++ b/src/beta/realtime/ws.ts @@ -52,7 +52,7 @@ export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { } static async azure( - client: AzureOpenAI, + client: Pick, options: { deploymentName?: string; options?: WS.ClientOptions | undefined } = {}, ): Promise { const deploymentName = options.deploymentName ?? client.deploymentName; @@ -82,7 +82,7 @@ export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { } } -async function getAzureHeaders(client: AzureOpenAI) { +async function getAzureHeaders(client: Pick) { if (client.apiKey !== '') { return { 'api-key': client.apiKey }; } else { From 27d354a363d3c8dc5056bd28f8f1073757046f48 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 21:25:41 +0000 Subject: [PATCH 405/533] fix(api): add missing reasoning effort + model enums (#1302) --- .stats.yml | 2 +- src/resources/beta/assistants.ts | 51 ++++++++++++++++++- src/resources/beta/threads/runs/runs.ts | 10 ++++ src/resources/chat/completions.ts | 8 +-- tests/api-resources/beta/assistants.test.ts | 1 + .../beta/threads/runs/runs.test.ts | 1 + 6 files changed, 67 insertions(+), 6 deletions(-) diff --git a/.stats.yml b/.stats.yml index df7877dfd..8a5d2c06b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fc5dbc19505b0035f9e7f88868619f4fb519b048bde011f6154f3132d4be71fb.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7c699d4503077d06a4a44f52c0c1f902d19a87c766b8be75b97c8dfd484ad4aa.yml diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 69a5db520..0cc63d691 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -1133,6 +1133,16 @@ export interface AssistantCreateParams { */ name?: string | null; + /** + * **o1 and o3-mini models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + reasoning_effort?: 'low' | 'medium' | 'high' | null; + /** * Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -1288,13 +1298,52 @@ export interface AssistantUpdateParams { * [Model overview](https://platform.openai.com/docs/models) for descriptions of * them. */ - model?: string; + model?: + | (string & {}) + | 'o3-mini' + | 'o3-mini-2025-01-31' + | 'o1' + | 'o1-2024-12-17' + | 'gpt-4o' + | 'gpt-4o-2024-11-20' + | 'gpt-4o-2024-08-06' + | 'gpt-4o-2024-05-13' + | 'gpt-4o-mini' + | 'gpt-4o-mini-2024-07-18' + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613'; /** * The name of the assistant. The maximum length is 256 characters. */ name?: string | null; + /** + * **o1 and o3-mini models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + reasoning_effort?: 'low' | 'medium' | 'high' | null; + /** * Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 84ba7b63c..8ab94cc99 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -731,6 +731,16 @@ export interface RunCreateParamsBase { */ parallel_tool_calls?: boolean; + /** + * Body param: **o1 and o3-mini models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + reasoning_effort?: 'low' | 'medium' | 'high' | null; + /** * Body param: Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 55b008cf0..2586845c3 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -744,14 +744,14 @@ export interface ChatCompletionPredictionContent { } /** - * **o1 models only** + * **o1 and o3-mini models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ -export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high'; +export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high' | null; /** * The role of the author of a message @@ -1063,14 +1063,14 @@ export interface ChatCompletionCreateParamsBase { presence_penalty?: number | null; /** - * **o1 models only** + * **o1 and o3-mini models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: ChatCompletionReasoningEffort; + reasoning_effort?: ChatCompletionReasoningEffort | null; /** * An object specifying the format that the model must output. diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index 88a10ba8f..16bc9f942 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -27,6 +27,7 @@ describe('resource assistants', () => { instructions: 'instructions', metadata: { foo: 'string' }, name: 'name', + reasoning_effort: 'low', response_format: 'auto', temperature: 1, tool_resources: { diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 9b728403f..13ae89a00 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -39,6 +39,7 @@ describe('resource runs', () => { metadata: { foo: 'string' }, model: 'gpt-4o', parallel_tool_calls: true, + reasoning_effort: 'low', response_format: 'auto', stream: false, temperature: 1, From f44641236e9f90758c535cc948d5734ae20fd5a5 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 5 Feb 2025 20:33:57 +0000 Subject: [PATCH 406/533] docs(readme): cleanup into multiple files --- README.md | 421 +++++++++++----------------------------------------- azure.md | 49 ++++++ helpers.md | 122 ++++++++++----- realtime.md | 87 +++++++++++ 4 files changed, 313 insertions(+), 366 deletions(-) create mode 100644 azure.md create mode 100644 realtime.md diff --git a/README.md b/README.md index a1f4bf760..166e35e22 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ import OpenAI from 'jsr:@openai/openai'; The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). The code below shows how to get started using the chat completions API. -```js +```ts import OpenAI from 'openai'; const client = new OpenAI({ @@ -80,189 +80,11 @@ async function main() { main(); ``` -If you need to cancel a stream, you can `break` from the loop -or call `stream.controller.abort()`. - -## Realtime API beta - -The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a `WebSocket` connection. - -The Realtime API works through a combination of client-sent events and server-sent events. Clients can send events to do things like update session configuration or send text and audio inputs. Server events confirm when audio responses have completed, or when a text response from the model has been received. A full event reference can be found [here](https://platform.openai.com/docs/api-reference/realtime-client-events) and a guide can be found [here](https://platform.openai.com/docs/guides/realtime). - -This SDK supports accessing the Realtime API through the [WebSocket API](https://developer.mozilla.org/en-US/docs/Web/API/WebSocket) or with [ws](https://github.com/websockets/ws). - -Basic text based example with `ws`: - -```ts -// requires `yarn add ws @types/ws` -import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; - -const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); - -// access the underlying `ws.WebSocket` instance -rt.socket.on('open', () => { - console.log('Connection opened!'); - rt.send({ - type: 'session.update', - session: { - modalities: ['text'], - model: 'gpt-4o-realtime-preview', - }, - }); - - rt.send({ - type: 'conversation.item.create', - item: { - type: 'message', - role: 'user', - content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], - }, - }); - - rt.send({ type: 'response.create' }); -}); - -rt.on('error', (err) => { - // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors - throw err; -}); - -rt.on('session.created', (event) => { - console.log('session created!', event.session); - console.log(); -}); - -rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); -rt.on('response.text.done', () => console.log()); - -rt.on('response.done', () => rt.close()); - -rt.socket.on('close', () => console.log('\nConnection closed!')); -``` - -To use the web API `WebSocket` implementation, replace `OpenAIRealtimeWS` with `OpenAIRealtimeWebSocket` and adjust any `rt.socket` access: - -```ts -import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; - -const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); -// ... -rt.socket.addEventListener('open', () => { - // ... -}); -``` - -A full example can be found [here](https://github.com/openai/openai-node/blob/master/examples/realtime/websocket.ts). - -### Realtime error handling - -When an error is encountered, either on the client side or returned from the server through the [`error` event](https://platform.openai.com/docs/guides/realtime-model-capabilities#error-handling), the `error` event listener will be fired. However, if you haven't registered an `error` event listener then an `unhandled Promise rejection` error will be thrown. - -It is **highly recommended** that you register an `error` event listener and handle errors approriately as typically the underlying connection is still usable. - -```ts -const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); -rt.on('error', (err) => { - // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors - throw err; -}); -``` - -### Request & Response types - -This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: - - -```ts -import OpenAI from 'openai'; - -const client = new OpenAI({ - apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted -}); - -async function main() { - const params: OpenAI.Chat.ChatCompletionCreateParams = { - messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-4o', - }; - const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params); -} - -main(); -``` - -Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. - -> [!IMPORTANT] -> Previous versions of this SDK used a `Configuration` class. See the [v3 to v4 migration guide](https://github.com/openai/openai-node/discussions/217). - -### Polling Helpers - -When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes -helper functions which will poll the status until it reaches a terminal state and then return the resulting object. -If an API method results in an action which could benefit from polling there will be a corresponding version of the -method ending in 'AndPoll'. - -For instance to create a Run and poll until it reaches a terminal state you can run: - -```ts -const run = await openai.beta.threads.runs.createAndPoll(thread.id, { - assistant_id: assistantId, -}); -``` - -More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/deep-dive/run-lifecycle) - -### Bulk Upload Helpers - -When creating and interacting with vector stores, you can use the polling helpers to monitor the status of operations. -For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once. - -```ts -const fileList = [ - createReadStream('/home/data/example.pdf'), - ... -]; - -const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, {files: fileList}); -``` - -### Streaming Helpers - -The SDK also includes helpers to process streams and handle the incoming events. - -```ts -const run = openai.beta.threads.runs - .stream(thread.id, { - assistant_id: assistant.id, - }) - .on('textCreated', (text) => process.stdout.write('\nassistant > ')) - .on('textDelta', (textDelta, snapshot) => process.stdout.write(textDelta.value)) - .on('toolCallCreated', (toolCall) => process.stdout.write(`\nassistant > ${toolCall.type}\n\n`)) - .on('toolCallDelta', (toolCallDelta, snapshot) => { - if (toolCallDelta.type === 'code_interpreter') { - if (toolCallDelta.code_interpreter.input) { - process.stdout.write(toolCallDelta.code_interpreter.input); - } - if (toolCallDelta.code_interpreter.outputs) { - process.stdout.write('\noutput >\n'); - toolCallDelta.code_interpreter.outputs.forEach((output) => { - if (output.type === 'logs') { - process.stdout.write(`\n${output.logs}\n`); - } - }); - } - } - }); -``` - -More information on streaming helpers can be found in the dedicated documentation: [helpers.md](helpers.md) +If you need to cancel a stream, you can `break` from the loop or call `stream.controller.abort()`. -### Streaming responses +### Chat Completion streaming helpers -This library provides several conveniences for streaming chat completions, for example: +This library also provides several conveniences for streaming chat completions, for example: ```ts import OpenAI from 'openai'; @@ -292,98 +114,32 @@ async function main() { main(); ``` -Streaming with `openai.beta.chat.completions.stream({…})` exposes -[various helpers for your convenience](helpers.md#chat-events) including event handlers and promises. - -Alternatively, you can use `openai.chat.completions.create({ stream: true, … })` -which only returns an async iterable of the chunks in the stream and thus uses less memory -(it does not build up a final chat completion object for you). - -If you need to cancel a stream, you can `break` from a `for await` loop or call `stream.abort()`. - -### Automated function calls +See [helpers.md](helpers.md#chat-events) for more details. -We provide the `openai.beta.chat.completions.runTools({…})` -convenience helper for using function tool calls with the `/chat/completions` endpoint -which automatically call the JavaScript functions you provide -and sends their results back to the `/chat/completions` endpoint, -looping as long as the model requests tool calls. - -If you pass a `parse` function, it will automatically parse the `arguments` for you -and returns any parsing errors to the model to attempt auto-recovery. -Otherwise, the args will be passed to the function you provide as a string. +### Request & Response types -If you pass `tool_choice: {function: {name: …}}` instead of `auto`, -it returns immediately after calling that function (and only loops to auto-recover parsing errors). +This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: + ```ts import OpenAI from 'openai'; -const client = new OpenAI(); +const client = new OpenAI({ + apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted +}); async function main() { - const runner = client.beta.chat.completions - .runTools({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'How is the weather this week?' }], - tools: [ - { - type: 'function', - function: { - function: getCurrentLocation, - parameters: { type: 'object', properties: {} }, - }, - }, - { - type: 'function', - function: { - function: getWeather, - parse: JSON.parse, // or use a validation library like zod for typesafe parsing. - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - }, - }, - }, - }, - ], - }) - .on('message', (message) => console.log(message)); - - const finalContent = await runner.finalContent(); - console.log(); - console.log('Final content:', finalContent); -} - -async function getCurrentLocation() { - return 'Boston'; // Simulate lookup -} - -async function getWeather(args: { location: string }) { - const { location } = args; - // … do lookup … - return { temperature, precipitation }; + const params: OpenAI.Chat.ChatCompletionCreateParams = { + messages: [{ role: 'user', content: 'Say this is a test' }], + model: 'gpt-4o', + }; + const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params); } main(); - -// {role: "user", content: "How's the weather this week?"} -// {role: "assistant", tool_calls: [{type: "function", function: {name: "getCurrentLocation", arguments: "{}"}, id: "123"} -// {role: "tool", name: "getCurrentLocation", content: "Boston", tool_call_id: "123"} -// {role: "assistant", tool_calls: [{type: "function", function: {name: "getWeather", arguments: '{"location": "Boston"}'}, id: "1234"}]} -// {role: "tool", name: "getWeather", content: '{"temperature": "50degF", "preciptation": "high"}', tool_call_id: "1234"} -// {role: "assistant", content: "It's looking cold and rainy - you might want to wear a jacket!"} -// -// Final content: "It's looking cold and rainy - you might want to wear a jacket!" ``` -Like with `.stream()`, we provide a variety of [helpers and events](helpers.md#chat-events). - -Note that `runFunctions` was previously available as well, but has been deprecated in favor of `runTools`. - -Read more about various examples such as with integrating with [zod](helpers.md#integrate-with-zod), -[next.js](helpers.md#integrate-with-nextjs), and [proxying a stream to the browser](helpers.md#proxy-streaming-to-a-browser). +Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. ## File uploads @@ -434,6 +190,7 @@ async function main() { .create({ model: 'gpt-4o', training_file: 'file-abc123' }) .catch(async (err) => { if (err instanceof OpenAI.APIError) { + console.log(err.request_id); console.log(err.status); // 400 console.log(err.name); // BadRequestError console.log(err.headers); // {server: 'nginx', ...} @@ -459,76 +216,6 @@ Error codes are as followed: | >=500 | `InternalServerError` | | N/A | `APIConnectionError` | -## Request IDs - -> For more information on debugging requests, see [these docs](https://platform.openai.com/docs/api-reference/debugging-requests) - -All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. - -```ts -const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }); -console.log(completion._request_id) // req_123 -``` - -You can also access the Request ID using the `.withResponse()` method: - -```ts -const { data: stream, request_id } = await openai.chat.completions - .create({ - model: 'gpt-4', - messages: [{ role: 'user', content: 'Say this is a test' }], - stream: true, - }) - .withResponse(); -``` - -## Microsoft Azure OpenAI - -To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` -class instead of the `OpenAI` class. - -> [!IMPORTANT] -> The Azure API shape slightly differs from the core API shape which means that the static types for responses / params -> won't always be correct. - -```ts -import { AzureOpenAI } from 'openai'; -import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; - -const credential = new DefaultAzureCredential(); -const scope = 'https://cognitiveservices.azure.com/.default'; -const azureADTokenProvider = getBearerTokenProvider(credential, scope); - -const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: "" }); - -const result = await openai.chat.completions.create({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Say hello!' }], -}); - -console.log(result.choices[0]!.message?.content); -``` - -### Realtime API -This SDK provides real-time streaming capabilities for Azure OpenAI through the `OpenAIRealtimeWS` and `OpenAIRealtimeWebSocket` clients described previously. - -To utilize the real-time features, begin by creating a fully configured `AzureOpenAI` client and passing it into either `OpenAIRealtimeWS.azure` or `OpenAIRealtimeWebSocket.azure`. For example: - -```ts -const cred = new DefaultAzureCredential(); -const scope = 'https://cognitiveservices.azure.com/.default'; -const deploymentName = 'gpt-4o-realtime-preview-1001'; -const azureADTokenProvider = getBearerTokenProvider(cred, scope); -const client = new AzureOpenAI({ - azureADTokenProvider, - apiVersion: '2024-10-01-preview', - deployment: deploymentName, -}); -const rt = await OpenAIRealtimeWS.azure(client); -``` - -Once the instance has been created, you can then begin sending requests and receiving streaming responses in real time. - ### Retries Certain errors will be automatically retried 2 times by default, with a short exponential backoff. @@ -571,6 +258,29 @@ On timeout, an `APIConnectionTimeoutError` is thrown. Note that requests which time out will be [retried twice by default](#retries). +## Request IDs + +> For more information on debugging requests, see [these docs](https://platform.openai.com/docs/api-reference/debugging-requests) + +All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. + +```ts +const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }); +console.log(completion._request_id) // req_123 +``` + +You can also access the Request ID using the `.withResponse()` method: + +```ts +const { data: stream, request_id } = await openai.chat.completions + .create({ + model: 'gpt-4', + messages: [{ role: 'user', content: 'Say this is a test' }], + stream: true, + }) + .withResponse(); +``` + ## Auto-pagination List methods in the OpenAI API are paginated. @@ -602,6 +312,55 @@ while (page.hasNextPage()) { } ``` +## Realtime API Beta + +The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a `WebSocket` connection. + +```ts +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; + +const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + +rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); +``` + +For more information see [realtime.md](realtime.md). + +## Microsoft Azure OpenAI + +To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` +class instead of the `OpenAI` class. + +> [!IMPORTANT] +> The Azure API shape slightly differs from the core API shape which means that the static types for responses / params +> won't always be correct. + +```ts +import { AzureOpenAI } from 'openai'; +import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; + +const credential = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const azureADTokenProvider = getBearerTokenProvider(credential, scope); + +const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: "" }); + +const result = await openai.chat.completions.create({ + model: 'gpt-4o', + messages: [{ role: 'user', content: 'Say hello!' }], +}); + +console.log(result.choices[0]!.message?.content); +``` + +For more information on support for the Azure API, see [azure.md](azure.md). + +## Automated function calls + +We provide the `openai.beta.chat.completions.runTools({…})` convenience helper for using function tool calls with the `/chat/completions` endpoint which automatically call the JavaScript functions you provide and sends their results back to the `/chat/completions` endpoint, looping as long as the model requests tool calls. + +For more information see [helpers.md](helpers.md#automated-function-calls). + ## Advanced Usage ### Accessing raw Response data (e.g., headers) diff --git a/azure.md b/azure.md new file mode 100644 index 000000000..df06c2985 --- /dev/null +++ b/azure.md @@ -0,0 +1,49 @@ +# Microsoft Azure OpenAI + +To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` +class instead of the `OpenAI` class. + +> [!IMPORTANT] +> The Azure API shape slightly differs from the core API shape which means that the static types for responses / params +> won't always be correct. + +```ts +import { AzureOpenAI } from 'openai'; +import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; + +const credential = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const azureADTokenProvider = getBearerTokenProvider(credential, scope); + +const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: "" }); + +const result = await openai.chat.completions.create({ + model: 'gpt-4o', + messages: [{ role: 'user', content: 'Say hello!' }], +}); + +console.log(result.choices[0]!.message?.content); +``` + +For more information on support for the Azure API, see [azure.md](azure.md). + +## Realtime API + +This SDK provides real-time streaming capabilities for Azure OpenAI through the `OpenAIRealtimeWS` and `OpenAIRealtimeWebSocket` clients described previously. + +To utilize the real-time features, begin by creating a fully configured `AzureOpenAI` client and passing it into either `OpenAIRealtimeWS.azure` or `OpenAIRealtimeWebSocket.azure`. For example: + +```ts +const cred = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const deploymentName = 'gpt-4o-realtime-preview-1001'; +const azureADTokenProvider = getBearerTokenProvider(cred, scope); +const client = new AzureOpenAI({ + azureADTokenProvider, + apiVersion: '2024-10-01-preview', + deployment: deploymentName, +}); +const rt = await OpenAIRealtimeWS.azure(client); +``` + +Once the instance has been created, you can then begin sending requests and receiving streaming responses in real time. diff --git a/helpers.md b/helpers.md index 16bc1f277..41b352e5e 100644 --- a/helpers.md +++ b/helpers.md @@ -142,9 +142,7 @@ More information can be found in the documentation: [Assistant Streaming](https: ```ts const run = openai.beta.threads.runs - .stream(thread.id, { - assistant_id: assistant.id, - }) + .stream(thread.id, { assistant_id: assistant.id }) .on('textCreated', (text) => process.stdout.write('\nassistant > ')) .on('textDelta', (textDelta, snapshot) => process.stdout.write(textDelta.value)) .on('toolCallCreated', (toolCall) => process.stdout.write(`\nassistant > ${toolCall.type}\n\n`)) @@ -304,47 +302,87 @@ If you need to cancel a stream, you can `break` from a `for await` loop or call See an example of streaming helpers in action in [`examples/stream.ts`](examples/stream.ts). -### Automated Function Calls +### Automated function calls -```ts -openai.chat.completions.runTools({ stream: false, … }, options?): ChatCompletionRunner -openai.chat.completions.runTools({ stream: true, … }, options?): ChatCompletionStreamingRunner -``` +We provide the `openai.beta.chat.completions.runTools({…})` +convenience helper for using function tool calls with the `/chat/completions` endpoint +which automatically call the JavaScript functions you provide +and sends their results back to the `/chat/completions` endpoint, +looping as long as the model requests tool calls. -`openai.chat.completions.runTools()` returns a Runner -for automating function calls with chat completions. -The runner automatically calls the JavaScript functions you provide and sends their results back -to the API, looping as long as the model requests function calls. +If you pass a `parse` function, it will automatically parse the `arguments` for you +and returns any parsing errors to the model to attempt auto-recovery. +Otherwise, the args will be passed to the function you provide as a string. -If you pass a `parse` function, it will automatically parse the `arguments` for you and returns any parsing -errors to the model to attempt auto-recovery. Otherwise, the args will be passed to the function you provide -as a string. +If you pass `tool_choice: {function: {name: …}}` instead of `auto`, +it returns immediately after calling that function (and only loops to auto-recover parsing errors). ```ts -client.chat.completions.runTools({ - model: 'gpt-3.5-turbo', - messages: [{ role: 'user', content: 'How is the weather this week?' }], - tools: [ - { - type: 'function', - function: { - function: getWeather as (args: { location: string; time: Date }) => any, - parse: parseFunction as (args: strings) => { location: string; time: Date }, - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - time: { type: 'string', format: 'date-time' }, +import OpenAI from 'openai'; + +const client = new OpenAI(); + +async function main() { + const runner = client.beta.chat.completions + .runTools({ + model: 'gpt-4o', + messages: [{ role: 'user', content: 'How is the weather this week?' }], + tools: [ + { + type: 'function', + function: { + function: getCurrentLocation, + parameters: { type: 'object', properties: {} }, }, }, - }, - }, - ], -}); + { + type: 'function', + function: { + function: getWeather, + parse: JSON.parse, // or use a validation library like zod for typesafe parsing. + parameters: { + type: 'object', + properties: { + location: { type: 'string' }, + }, + }, + }, + }, + ], + }) + .on('message', (message) => console.log(message)); + + const finalContent = await runner.finalContent(); + console.log(); + console.log('Final content:', finalContent); +} + +async function getCurrentLocation() { + return 'Boston'; // Simulate lookup +} + +async function getWeather(args: { location: string }) { + const { location } = args; + // … do lookup … + return { temperature, precipitation }; +} + +main(); + +// {role: "user", content: "How's the weather this week?"} +// {role: "assistant", tool_calls: [{type: "function", function: {name: "getCurrentLocation", arguments: "{}"}, id: "123"} +// {role: "tool", name: "getCurrentLocation", content: "Boston", tool_call_id: "123"} +// {role: "assistant", tool_calls: [{type: "function", function: {name: "getWeather", arguments: '{"location": "Boston"}'}, id: "1234"}]} +// {role: "tool", name: "getWeather", content: '{"temperature": "50degF", "preciptation": "high"}', tool_call_id: "1234"} +// {role: "assistant", content: "It's looking cold and rainy - you might want to wear a jacket!"} +// +// Final content: "It's looking cold and rainy - you might want to wear a jacket!" ``` -If you pass `function_call: {name: …}` instead of `auto`, it returns immediately after calling that -function (and only loops to auto-recover parsing errors). +Like with `.stream()`, we provide a variety of [helpers and events](helpers.md#chat-events). + +Read more about various examples such as with integrating with [zod](#integrate-with-zod), +[next.js](#integrate-with-nextjs), and [proxying a stream to the browser](#proxy-streaming-to-a-browser). By default, we run the loop up to 10 chat completions from the API. You can change this behavior by adjusting `maxChatCompletions` in the request options object. Note that `max_tokens` is the limit per @@ -662,3 +700,17 @@ client.beta.vectorStores.files.createAndPoll((...) client.beta.vectorStores.fileBatches.createAndPoll((...) client.beta.vectorStores.fileBatches.uploadAndPoll((...) ``` + +# Bulk Upload Helpers + +When creating and interacting with vector stores, you can use the polling helpers to monitor the status of operations. +For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once. + +```ts +const fileList = [ + createReadStream('/home/data/example.pdf'), + ... +]; + +const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, {files: fileList}); +``` diff --git a/realtime.md b/realtime.md new file mode 100644 index 000000000..2fcd17e9e --- /dev/null +++ b/realtime.md @@ -0,0 +1,87 @@ +## Realtime API beta + +The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a `WebSocket` connection. + +The Realtime API works through a combination of client-sent events and server-sent events. Clients can send events to do things like update session configuration or send text and audio inputs. Server events confirm when audio responses have completed, or when a text response from the model has been received. A full event reference can be found [here](https://platform.openai.com/docs/api-reference/realtime-client-events) and a guide can be found [here](https://platform.openai.com/docs/guides/realtime). + +This SDK supports accessing the Realtime API through the [WebSocket API](https://developer.mozilla.org/en-US/docs/Web/API/WebSocket) or with [ws](https://github.com/websockets/ws). + +Basic text based example with `ws`: + +```ts +// requires `yarn add ws @types/ws` +import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; + +const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + +// access the underlying `ws.WebSocket` instance +rt.socket.on('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); +}); + +rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; +}); + +rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); +}); + +rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); +rt.on('response.text.done', () => console.log()); + +rt.on('response.done', () => rt.close()); + +rt.socket.on('close', () => console.log('\nConnection closed!')); +``` + +To use the web API `WebSocket` implementation, replace `OpenAIRealtimeWS` with `OpenAIRealtimeWebSocket` and adjust any `rt.socket` access: + +```ts +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; + +const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); +// ... +rt.socket.addEventListener('open', () => { + // ... +}); +``` + +A full example can be found [here](https://github.com/openai/openai-node/blob/master/examples/realtime/websocket.ts). + +### Realtime error handling + +When an error is encountered, either on the client side or returned from the server through the [`error` event](https://platform.openai.com/docs/guides/realtime-model-capabilities#error-handling), the `error` event listener will be fired. However, if you haven't registered an `error` event listener then an `unhandled Promise rejection` error will be thrown. + +It is **highly recommended** that you register an `error` event listener and handle errors approriately as typically the underlying connection is still usable. + +```ts +const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); +rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue procesing events regardless of any errors + throw err; +}); +``` + From 23c194b4b927e50d0f5a78272e9ac50b181c53eb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 6 Feb 2025 15:16:31 +0000 Subject: [PATCH 407/533] feat(pagination): avoid fetching when has_more: false (#1305) --- .stats.yml | 2 +- src/pagination.ts | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 8a5d2c06b..d59a86d22 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7c699d4503077d06a4a44f52c0c1f902d19a87c766b8be75b97c8dfd484ad4aa.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-dfb00c627f58e5180af7a9b29ed2f2aa0764a3b9daa6a32a1cc45bc8e48dfe15.yml diff --git a/src/pagination.ts b/src/pagination.ts index 63644e333..ad90a3a74 100644 --- a/src/pagination.ts +++ b/src/pagination.ts @@ -43,6 +43,8 @@ export class Page extends AbstractPage implements PageResponse export interface CursorPageResponse { data: Array; + + has_more: boolean; } export interface CursorPageParams { @@ -57,6 +59,8 @@ export class CursorPage { data: Array; + has_more: boolean; + constructor( client: APIClient, response: Response, @@ -66,12 +70,21 @@ export class CursorPage super(client, response, body, options); this.data = body.data || []; + this.has_more = body.has_more || false; } getPaginatedItems(): Item[] { return this.data ?? []; } + override hasNextPage() { + if (this.has_more === false) { + return false; + } + + return super.hasNextPage(); + } + // @deprecated Please use `nextPageInfo()` instead nextPageParams(): Partial | null { const info = this.nextPageInfo(); From 2d071dfd9e507e3a37177d1f96a5438ba9ac1268 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 10 Feb 2025 12:12:44 +0000 Subject: [PATCH 408/533] chore(internal): remove segfault-handler dependency --- ecosystem-tests/cli.ts | 4 ---- package.json | 1 - yarn.lock | 25 ------------------------- 3 files changed, 30 deletions(-) diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index 00120e5f9..4803b47c2 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -4,10 +4,6 @@ import yargs from 'yargs'; import assert from 'assert'; import path from 'path'; -// @ts-ignore -var SegfaultHandler = require('segfault-handler'); -SegfaultHandler.registerHandler('crash.log'); - const TAR_NAME = 'openai.tgz'; const PACK_FOLDER = '.pack'; const PACK_FILE = `${PACK_FOLDER}/${TAR_NAME}`; diff --git a/package.json b/package.json index bd507e9f8..df2dcd2bc 100644 --- a/package.json +++ b/package.json @@ -47,7 +47,6 @@ "jest": "^29.4.0", "prettier": "^3.0.0", "prettier-2": "npm:prettier@^2", - "segfault-handler": "^1.3.0", "ts-jest": "^29.1.0", "ts-node": "^10.5.0", "tsc-multi": "^1.1.0", diff --git a/yarn.lock b/yarn.lock index 0a4307f70..ad5fb7630 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1176,13 +1176,6 @@ big-integer@^1.6.44: resolved "https://registry.yarnpkg.com/big-integer/-/big-integer-1.6.52.tgz#60a887f3047614a8e1bffe5d7173490a97dc8c85" integrity sha512-QxD8cf2eVqJOOz63z6JIN9BzvVs/dlySa5HGSBH5xtR8dPteIRQnBxxKqkNTiT6jbDTF6jAfrd4oMcND9RGbQg== -bindings@^1.2.1: - version "1.5.0" - resolved "https://registry.yarnpkg.com/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df" - integrity sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ== - dependencies: - file-uri-to-path "1.0.0" - bplist-parser@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/bplist-parser/-/bplist-parser-0.2.0.tgz#43a9d183e5bf9d545200ceac3e712f79ebbe8d0e" @@ -1760,11 +1753,6 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" -file-uri-to-path@1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz#553a7b8446ff6f684359c445f1e37a05dacc33dd" - integrity sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw== - fill-range@^7.1.1: version "7.1.1" resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" @@ -2706,11 +2694,6 @@ ms@^2.0.0, ms@^2.1.3: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== -nan@^2.14.0: - version "2.22.0" - resolved "https://registry.yarnpkg.com/nan/-/nan-2.22.0.tgz#31bc433fc33213c97bad36404bb68063de604de3" - integrity sha512-nbajikzWTMwsW+eSsNm3QwlOs7het9gGJU5dDZzRTQGk03vyBOauxgI4VakDzE0PtsGTmXPsXTbbjVhRwR5mpw== - natural-compare@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" @@ -3061,14 +3044,6 @@ safe-buffer@~5.2.0: resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== -segfault-handler@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/segfault-handler/-/segfault-handler-1.3.0.tgz#054bc847832fa14f218ba6a79e42877501c8870e" - integrity sha512-p7kVHo+4uoYkr0jmIiTBthwV5L2qmWtben/KDunDZ834mbos+tY+iO0//HpAJpOFSQZZ+wxKWuRo4DxV02B7Lg== - dependencies: - bindings "^1.2.1" - nan "^2.14.0" - semver@^6.3.0, semver@^6.3.1: version "6.3.1" resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" From 85ff876a75147490e60c70c2f36e964513f1086a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 10 Feb 2025 20:06:34 +0000 Subject: [PATCH 409/533] fix: correctly decode multi-byte characters over multiple chunks (#1316) --- src/internal/decoders/line.ts | 107 ++++++++++++++++++++++------------ src/streaming.ts | 6 +- tests/streaming.test.ts | 53 ++++++++++++++++- 3 files changed, 126 insertions(+), 40 deletions(-) diff --git a/src/internal/decoders/line.ts b/src/internal/decoders/line.ts index 34e41d1dc..66f62c057 100644 --- a/src/internal/decoders/line.ts +++ b/src/internal/decoders/line.ts @@ -13,52 +13,58 @@ export class LineDecoder { static NEWLINE_CHARS = new Set(['\n', '\r']); static NEWLINE_REGEXP = /\r\n|[\n\r]/g; - buffer: string[]; - trailingCR: boolean; + buffer: Uint8Array; + #carriageReturnIndex: number | null; textDecoder: any; // TextDecoder found in browsers; not typed to avoid pulling in either "dom" or "node" types. constructor() { - this.buffer = []; - this.trailingCR = false; + this.buffer = new Uint8Array(); + this.#carriageReturnIndex = null; } decode(chunk: Bytes): string[] { - let text = this.decodeText(chunk); - - if (this.trailingCR) { - text = '\r' + text; - this.trailingCR = false; - } - if (text.endsWith('\r')) { - this.trailingCR = true; - text = text.slice(0, -1); - } - - if (!text) { + if (chunk == null) { return []; } - const trailingNewline = LineDecoder.NEWLINE_CHARS.has(text[text.length - 1] || ''); - let lines = text.split(LineDecoder.NEWLINE_REGEXP); + const binaryChunk = + chunk instanceof ArrayBuffer ? new Uint8Array(chunk) + : typeof chunk === 'string' ? new TextEncoder().encode(chunk) + : chunk; + + let newData = new Uint8Array(this.buffer.length + binaryChunk.length); + newData.set(this.buffer); + newData.set(binaryChunk, this.buffer.length); + this.buffer = newData; + + const lines: string[] = []; + let patternIndex; + while ((patternIndex = findNewlineIndex(this.buffer, this.#carriageReturnIndex)) != null) { + if (patternIndex.carriage && this.#carriageReturnIndex == null) { + // skip until we either get a corresponding `\n`, a new `\r` or nothing + this.#carriageReturnIndex = patternIndex.index; + continue; + } - // if there is a trailing new line then the last entry will be an empty - // string which we don't care about - if (trailingNewline) { - lines.pop(); - } + // we got double \r or \rtext\n + if ( + this.#carriageReturnIndex != null && + (patternIndex.index !== this.#carriageReturnIndex + 1 || patternIndex.carriage) + ) { + lines.push(this.decodeText(this.buffer.slice(0, this.#carriageReturnIndex - 1))); + this.buffer = this.buffer.slice(this.#carriageReturnIndex); + this.#carriageReturnIndex = null; + continue; + } - if (lines.length === 1 && !trailingNewline) { - this.buffer.push(lines[0]!); - return []; - } + const endIndex = + this.#carriageReturnIndex !== null ? patternIndex.preceding - 1 : patternIndex.preceding; - if (this.buffer.length > 0) { - lines = [this.buffer.join('') + lines[0], ...lines.slice(1)]; - this.buffer = []; - } + const line = this.decodeText(this.buffer.slice(0, endIndex)); + lines.push(line); - if (!trailingNewline) { - this.buffer = [lines.pop() || '']; + this.buffer = this.buffer.slice(patternIndex.index); + this.#carriageReturnIndex = null; } return lines; @@ -102,13 +108,38 @@ export class LineDecoder { } flush(): string[] { - if (!this.buffer.length && !this.trailingCR) { + if (!this.buffer.length) { return []; } + return this.decode('\n'); + } +} - const lines = [this.buffer.join('')]; - this.buffer = []; - this.trailingCR = false; - return lines; +/** + * This function searches the buffer for the end patterns, (\r or \n) + * and returns an object with the index preceding the matched newline and the + * index after the newline char. `null` is returned if no new line is found. + * + * ```ts + * findNewLineIndex('abc\ndef') -> { preceding: 2, index: 3 } + * ``` + */ +function findNewlineIndex( + buffer: Uint8Array, + startIndex: number | null, +): { preceding: number; index: number; carriage: boolean } | null { + const newline = 0x0a; // \n + const carriage = 0x0d; // \r + + for (let i = startIndex ?? 0; i < buffer.length; i++) { + if (buffer[i] === newline) { + return { preceding: i, index: i + 1, carriage: false }; + } + + if (buffer[i] === carriage) { + return { preceding: i, index: i + 1, carriage: true }; + } } + + return null; } diff --git a/src/streaming.ts b/src/streaming.ts index 6a57a50a0..1d1ae344b 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -346,13 +346,17 @@ class SSEDecoder { } /** This is an internal helper function that's just used for testing */ -export function _decodeChunks(chunks: string[]): string[] { +export function _decodeChunks(chunks: string[], { flush }: { flush: boolean } = { flush: false }): string[] { const decoder = new LineDecoder(); const lines: string[] = []; for (const chunk of chunks) { lines.push(...decoder.decode(chunk)); } + if (flush) { + lines.push(...decoder.flush()); + } + return lines; } diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts index 6fe9a5781..8e5d0ca31 100644 --- a/tests/streaming.test.ts +++ b/tests/streaming.test.ts @@ -2,6 +2,7 @@ import { Response } from 'node-fetch'; import { PassThrough } from 'stream'; import assert from 'assert'; import { _iterSSEMessages, _decodeChunks as decodeChunks } from 'openai/streaming'; +import { LineDecoder } from 'openai/internal/decoders/line'; describe('line decoder', () => { test('basic', () => { @@ -10,8 +11,8 @@ describe('line decoder', () => { }); test('basic with \\r', () => { - // baz is not included because the line hasn't ended yet expect(decodeChunks(['foo', ' bar\r\nbaz'])).toEqual(['foo bar']); + expect(decodeChunks(['foo', ' bar\r\nbaz'], { flush: true })).toEqual(['foo bar', 'baz']); }); test('trailing new lines', () => { @@ -29,6 +30,56 @@ describe('line decoder', () => { test('escaped new lines with \\r', () => { expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']); }); + + test('\\r & \\n split across multiple chunks', () => { + expect(decodeChunks(['foo\r', '\n', 'bar'], { flush: true })).toEqual(['foo', 'bar']); + }); + + test('single \\r', () => { + expect(decodeChunks(['foo\r', 'bar'], { flush: true })).toEqual(['foo', 'bar']); + }); + + test('double \\r', () => { + expect(decodeChunks(['foo\r', 'bar\r'], { flush: true })).toEqual(['foo', 'bar']); + expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + // implementation detail that we don't yield the single \r line until a new \r or \n is encountered + expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: false })).toEqual(['foo']); + }); + + test('double \\r then \\r\\n', () => { + expect(decodeChunks(['foo\r', '\r', '\r', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); + expect(decodeChunks(['foo\n', '\n', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); + }); + + test('double newline', () => { + expect(decodeChunks(['foo\n\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo', '\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo', '\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + }); + + test('multi-byte characters across chunks', () => { + const decoder = new LineDecoder(); + + // bytes taken from the string 'известни' and arbitrarily split + // so that some multi-byte characters span multiple chunks + expect(decoder.decode(new Uint8Array([0xd0]))).toHaveLength(0); + expect(decoder.decode(new Uint8Array([0xb8, 0xd0, 0xb7, 0xd0]))).toHaveLength(0); + expect( + decoder.decode(new Uint8Array([0xb2, 0xd0, 0xb5, 0xd1, 0x81, 0xd1, 0x82, 0xd0, 0xbd, 0xd0, 0xb8])), + ).toHaveLength(0); + + const decoded = decoder.decode(new Uint8Array([0xa])); + expect(decoded).toEqual(['известни']); + }); + + test('flushing trailing newlines', () => { + expect(decodeChunks(['foo\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + }); + + test('flushing empty buffer', () => { + expect(decodeChunks([], { flush: true })).toEqual([]); + }); }); describe('streaming decoding', () => { From 5e5a38a3f5bd45e74eb624fe85664294247bf580 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 11 Feb 2025 11:19:35 +0000 Subject: [PATCH 410/533] fix(assistants): handle `thread.run.incomplete` event --- src/lib/AssistantStream.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index caf68e7dd..9b6cc20c5 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -370,6 +370,7 @@ export class AssistantStream case 'thread.run.in_progress': case 'thread.run.requires_action': case 'thread.run.completed': + case 'thread.run.incomplete': case 'thread.run.failed': case 'thread.run.cancelling': case 'thread.run.cancelled': @@ -400,6 +401,8 @@ export class AssistantStream throw new Error( 'Encountered an error event in event processing - errors should be processed earlier', ); + default: + assertNever(event); } } @@ -772,3 +775,5 @@ export class AssistantStream return await this._createToolAssistantStream(runs, threadId, runId, params, options); } } + +function assertNever(_x: never) {} From 0ea723831b52ed22cadfc997ddb45a758e2247db Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 12 Feb 2025 05:07:11 +0000 Subject: [PATCH 411/533] release: 4.84.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 25 +++++++++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 29 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6eb0f130e..063dfb8fd 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.83.0" + ".": "4.84.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index f61def5e4..d18ddf815 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Changelog +## 4.84.0 (2025-02-12) + +Full Changelog: [v4.83.0...v4.84.0](https://github.com/openai/openai-node/compare/v4.83.0...v4.84.0) + +### Features + +* **pagination:** avoid fetching when has_more: false ([#1305](https://github.com/openai/openai-node/issues/1305)) ([b6944c6](https://github.com/openai/openai-node/commit/b6944c634b53c9084f2ccf777c2491e89b2cc7af)) + + +### Bug Fixes + +* **api:** add missing reasoning effort + model enums ([#1302](https://github.com/openai/openai-node/issues/1302)) ([14c55c3](https://github.com/openai/openai-node/commit/14c55c312e31f1ed46d02f39a99049f785504a53)) +* **assistants:** handle `thread.run.incomplete` event ([7032cc4](https://github.com/openai/openai-node/commit/7032cc40b8aa0a58459cf114bceb8028a8517400)) +* correctly decode multi-byte characters over multiple chunks ([#1316](https://github.com/openai/openai-node/issues/1316)) ([dd776c4](https://github.com/openai/openai-node/commit/dd776c4867401f527f699bd4b9e567890256e849)) + + +### Chores + +* **internal:** remove segfault-handler dependency ([3521ca3](https://github.com/openai/openai-node/commit/3521ca34e7f5bd51542084e27c084a5d7cc5448b)) + + +### Documentation + +* **readme:** cleanup into multiple files ([da94424](https://github.com/openai/openai-node/commit/da944242e542e9e5e51cb11853c621fc6825ac02)) + ## 4.83.0 (2025-02-05) Full Changelog: [v4.82.0...v4.83.0](https://github.com/openai/openai-node/compare/v4.82.0...v4.83.0) diff --git a/jsr.json b/jsr.json index 6fa05e624..47c478074 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.83.0", + "version": "4.84.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index df2dcd2bc..96e9b048f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.83.0", + "version": "4.84.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 13c764d7d..b67556e78 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.83.0'; // x-release-please-version +export const VERSION = '4.84.0'; // x-release-please-version From 0e1981a128b4db5db657f22a54b711420ebbdb32 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 12 Feb 2025 16:01:16 +0000 Subject: [PATCH 412/533] fix(realtime): correct websocket type var constraint (#1321) --- src/beta/realtime/websocket.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/beta/realtime/websocket.ts b/src/beta/realtime/websocket.ts index e8143fdbf..b10a2519d 100644 --- a/src/beta/realtime/websocket.ts +++ b/src/beta/realtime/websocket.ts @@ -11,7 +11,7 @@ interface MessageEvent { type _WebSocket = typeof globalThis extends ( { - WebSocket: infer ws; + WebSocket: infer ws extends abstract new (...args: any) => any; } ) ? // @ts-ignore From c91ebef762fd55a553e15d7e4a1908243ea3e007 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 13 Feb 2025 05:07:08 +0000 Subject: [PATCH 413/533] release: 4.84.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 063dfb8fd..023314f41 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.84.0" + ".": "4.84.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d18ddf815..444430307 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.84.1 (2025-02-13) + +Full Changelog: [v4.84.0...v4.84.1](https://github.com/openai/openai-node/compare/v4.84.0...v4.84.1) + +### Bug Fixes + +* **realtime:** correct websocket type var constraint ([#1321](https://github.com/openai/openai-node/issues/1321)) ([afb17ea](https://github.com/openai/openai-node/commit/afb17ea6497b860ebbe5d8e68e4a97681dd307ff)) + ## 4.84.0 (2025-02-12) Full Changelog: [v4.83.0...v4.84.0](https://github.com/openai/openai-node/compare/v4.83.0...v4.84.0) diff --git a/jsr.json b/jsr.json index 47c478074..3148d6fca 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.84.0", + "version": "4.84.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 96e9b048f..4686e3a97 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.84.0", + "version": "4.84.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index b67556e78..767424b0e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.84.0'; // x-release-please-version +export const VERSION = '4.84.1'; // x-release-please-version From 6e9444c6c77a93ff4ce06bd5b27a9c236ba6f307 Mon Sep 17 00:00:00 2001 From: Jamon Holmgren Date: Thu, 13 Feb 2025 05:27:35 -0800 Subject: [PATCH 414/533] fix(realtime): call .toString() on WebSocket url (#1324) The [WebSocket spec at WHATWG](https://websockets.spec.whatwg.org/#ref-for-dom-websocket-websocket%E2%91%A0) indicates that the `url` parameter of the WebSocket constructor is a string. Some implementations (like Chrome) will accept a URL object, but calling .toString() should work for all cases. Fixes #1323. --- src/beta/realtime/websocket.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/beta/realtime/websocket.ts b/src/beta/realtime/websocket.ts index b10a2519d..e8900e809 100644 --- a/src/beta/realtime/websocket.ts +++ b/src/beta/realtime/websocket.ts @@ -53,7 +53,7 @@ export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { props.onURL?.(this.url); // @ts-ignore - this.socket = new WebSocket(this.url, [ + this.socket = new WebSocket(this.url.toString(), [ 'realtime', ...(isAzure(client) ? [] : [`openai-insecure-api-key.${client.apiKey}`]), 'openai-beta.realtime-v1', From be1ca6b9a6732214ac21ca375b5b0a9b7f492fd6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 13 Feb 2025 19:41:49 +0000 Subject: [PATCH 415/533] feat(api): add support for storing chat completions (#1327) --- .stats.yml | 4 +- api.md | 72 ++++---- src/index.ts | 27 ++- src/lib/ChatCompletionStream.ts | 2 +- src/resources/chat/chat.ts | 19 +- .../chat/{ => completions}/completions.ts | 170 ++++++++++++++++-- src/resources/chat/completions/index.ts | 49 +++++ src/resources/chat/completions/messages.ts | 52 ++++++ src/resources/chat/index.ts | 10 +- src/resources/completions.ts | 4 +- src/resources/moderations.ts | 4 +- tests/api-resources/chat/completions.test.ts | 65 ------- .../chat/completions/completions.test.ts | 144 +++++++++++++++ .../chat/completions/messages.test.ts | 40 +++++ 14 files changed, 534 insertions(+), 128 deletions(-) rename src/resources/chat/{ => completions}/completions.ts (88%) create mode 100644 src/resources/chat/completions/index.ts create mode 100644 src/resources/chat/completions/messages.ts delete mode 100644 tests/api-resources/chat/completions.test.ts create mode 100644 tests/api-resources/chat/completions/completions.test.ts create mode 100644 tests/api-resources/chat/completions/messages.test.ts diff --git a/.stats.yml b/.stats.yml index d59a86d22..658877d3b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-dfb00c627f58e5180af7a9b29ed2f2aa0764a3b9daa6a32a1cc45bc8e48dfe15.yml +configured_endpoints: 74 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4aa6ee65ba9efc789e05e6a5ef0883b2cadf06def8efd863dbf75e9e233067e1.yml diff --git a/api.md b/api.md index 01854a8e0..63f239628 100644 --- a/api.md +++ b/api.md @@ -32,39 +32,51 @@ Types: Types: -- ChatCompletion -- ChatCompletionAssistantMessageParam -- ChatCompletionAudio -- ChatCompletionAudioParam -- ChatCompletionChunk -- ChatCompletionContentPart -- ChatCompletionContentPartImage -- ChatCompletionContentPartInputAudio -- ChatCompletionContentPartRefusal -- ChatCompletionContentPartText -- ChatCompletionDeveloperMessageParam -- ChatCompletionFunctionCallOption -- ChatCompletionFunctionMessageParam -- ChatCompletionMessage -- ChatCompletionMessageParam -- ChatCompletionMessageToolCall -- ChatCompletionModality -- ChatCompletionNamedToolChoice -- ChatCompletionPredictionContent -- ChatCompletionReasoningEffort -- ChatCompletionRole -- ChatCompletionStreamOptions -- ChatCompletionSystemMessageParam -- ChatCompletionTokenLogprob -- ChatCompletionTool -- ChatCompletionToolChoiceOption -- ChatCompletionToolMessageParam -- ChatCompletionUserMessageParam -- CreateChatCompletionRequestMessage +- ChatCompletion +- ChatCompletionAssistantMessageParam +- ChatCompletionAudio +- ChatCompletionAudioParam +- ChatCompletionChunk +- ChatCompletionContentPart +- ChatCompletionContentPartImage +- ChatCompletionContentPartInputAudio +- ChatCompletionContentPartRefusal +- ChatCompletionContentPartText +- ChatCompletionDeleted +- ChatCompletionDeveloperMessageParam +- ChatCompletionFunctionCallOption +- ChatCompletionFunctionMessageParam +- ChatCompletionMessage +- ChatCompletionMessageParam +- ChatCompletionMessageToolCall +- ChatCompletionModality +- ChatCompletionNamedToolChoice +- ChatCompletionPredictionContent +- ChatCompletionReasoningEffort +- ChatCompletionRole +- ChatCompletionStoreMessage +- ChatCompletionStreamOptions +- ChatCompletionSystemMessageParam +- ChatCompletionTokenLogprob +- ChatCompletionTool +- ChatCompletionToolChoiceOption +- ChatCompletionToolMessageParam +- ChatCompletionUserMessageParam +- CreateChatCompletionRequestMessage Methods: -- client.chat.completions.create({ ...params }) -> ChatCompletion +- client.chat.completions.create({ ...params }) -> ChatCompletion +- client.chat.completions.retrieve(completionId) -> ChatCompletion +- client.chat.completions.update(completionId, { ...params }) -> ChatCompletion +- client.chat.completions.list({ ...params }) -> ChatCompletionsPage +- client.chat.completions.del(completionId) -> ChatCompletionDeleted + +### Messages + +Methods: + +- client.chat.completions.messages.list(completionId, { ...params }) -> ChatCompletionStoreMessagesPage # Embeddings diff --git a/src/index.ts b/src/index.ts index f4e940af8..debefce8c 100644 --- a/src/index.ts +++ b/src/index.ts @@ -66,6 +66,13 @@ import { import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio'; import { Beta } from './resources/beta/beta'; import { Chat, ChatModel } from './resources/chat/chat'; +import { FineTuning } from './resources/fine-tuning/fine-tuning'; +import { + Upload, + UploadCompleteParams, + UploadCreateParams, + Uploads as UploadsAPIUploads, +} from './resources/uploads/uploads'; import { ChatCompletion, ChatCompletionAssistantMessageParam, @@ -80,9 +87,11 @@ import { ChatCompletionCreateParams, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, + ChatCompletionDeleted, ChatCompletionDeveloperMessageParam, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, + ChatCompletionListParams, ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletionMessageToolCall, @@ -91,21 +100,17 @@ import { ChatCompletionPredictionContent, ChatCompletionReasoningEffort, ChatCompletionRole, + ChatCompletionStoreMessage, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, ChatCompletionTokenLogprob, ChatCompletionTool, ChatCompletionToolChoiceOption, ChatCompletionToolMessageParam, + ChatCompletionUpdateParams, ChatCompletionUserMessageParam, -} from './resources/chat/completions'; -import { FineTuning } from './resources/fine-tuning/fine-tuning'; -import { - Upload, - UploadCompleteParams, - UploadCreateParams, - Uploads as UploadsAPIUploads, -} from './resources/uploads/uploads'; + ChatCompletionsPage, +} from './resources/chat/completions/completions'; export interface ClientOptions { /** @@ -310,6 +315,7 @@ export class OpenAI extends Core.APIClient { OpenAI.Completions = Completions; OpenAI.Chat = Chat; +OpenAI.ChatCompletionsPage = ChatCompletionsPage; OpenAI.Embeddings = Embeddings; OpenAI.Files = Files; OpenAI.FileObjectsPage = FileObjectsPage; @@ -355,6 +361,7 @@ export declare namespace OpenAI { type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeleted as ChatCompletionDeleted, type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, @@ -366,6 +373,7 @@ export declare namespace OpenAI { type ChatCompletionPredictionContent as ChatCompletionPredictionContent, type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, @@ -373,9 +381,12 @@ export declare namespace OpenAI { type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, + type ChatCompletionUpdateParams as ChatCompletionUpdateParams, + type ChatCompletionListParams as ChatCompletionListParams, }; export { diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts index 6c846f70b..35648c27b 100644 --- a/src/lib/ChatCompletionStream.ts +++ b/src/lib/ChatCompletionStream.ts @@ -13,7 +13,7 @@ import { type ChatCompletionCreateParamsStreaming, type ChatCompletionCreateParamsBase, type ChatCompletionRole, -} from '../resources/chat/completions'; +} from '../resources/chat/completions/completions'; import { AbstractChatCompletionRunner, type AbstractChatCompletionRunnerEvents, diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index d4a18929c..5bceec45a 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; -import * as CompletionsAPI from './completions'; +import * as CompletionsAPI from './completions/completions'; import { ChatCompletion, ChatCompletionAssistantMessageParam, @@ -16,9 +16,11 @@ import { ChatCompletionCreateParams, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming, + ChatCompletionDeleted, ChatCompletionDeveloperMessageParam, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, + ChatCompletionListParams, ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletionMessageToolCall, @@ -27,19 +29,24 @@ import { ChatCompletionPredictionContent, ChatCompletionReasoningEffort, ChatCompletionRole, + ChatCompletionStoreMessage, ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, ChatCompletionTokenLogprob, ChatCompletionTool, ChatCompletionToolChoiceOption, ChatCompletionToolMessageParam, + ChatCompletionUpdateParams, ChatCompletionUserMessageParam, + ChatCompletionsPage, CompletionCreateParams, CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming, + CompletionListParams, + CompletionUpdateParams, Completions, CreateChatCompletionRequestMessage, -} from './completions'; +} from './completions/completions'; export class Chat extends APIResource { completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); @@ -87,6 +94,7 @@ export type ChatModel = | 'gpt-3.5-turbo-16k-0613'; Chat.Completions = Completions; +Chat.ChatCompletionsPage = ChatCompletionsPage; export declare namespace Chat { export { type ChatModel as ChatModel }; @@ -103,6 +111,7 @@ export declare namespace Chat { type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeleted as ChatCompletionDeleted, type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, @@ -114,6 +123,7 @@ export declare namespace Chat { type ChatCompletionPredictionContent as ChatCompletionPredictionContent, type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, @@ -122,11 +132,16 @@ export declare namespace Chat { type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type CompletionCreateParams as CompletionCreateParams, type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + type ChatCompletionUpdateParams as ChatCompletionUpdateParams, + type CompletionUpdateParams as CompletionUpdateParams, + type ChatCompletionListParams as ChatCompletionListParams, + type CompletionListParams as CompletionListParams, }; } diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions/completions.ts similarity index 88% rename from src/resources/chat/completions.ts rename to src/resources/chat/completions/completions.ts index 2586845c3..3af4a3a1d 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -1,15 +1,21 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../resource'; -import { APIPromise } from '../../core'; -import * as Core from '../../core'; -import * as ChatCompletionsAPI from './completions'; -import * as CompletionsAPI from '../completions'; -import * as Shared from '../shared'; -import * as ChatAPI from './chat'; -import { Stream } from '../../streaming'; +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import { APIPromise } from '../../../core'; +import * as Core from '../../../core'; +import * as CompletionsCompletionsAPI from './completions'; +import * as CompletionsAPI from '../../completions'; +import * as Shared from '../../shared'; +import * as ChatAPI from '../chat'; +import * as MessagesAPI from './messages'; +import { MessageListParams, Messages } from './messages'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; +import { Stream } from '../../../streaming'; export class Completions extends APIResource { + messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client); + /** * Creates a model response for the given chat conversation. Learn more in the * [text generation](https://platform.openai.com/docs/guides/text-generation), @@ -42,8 +48,60 @@ export class Completions extends APIResource { | APIPromise | APIPromise>; } + + /** + * Get a stored chat completion. Only chat completions that have been created with + * the `store` parameter set to `true` will be returned. + */ + retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/chat/completions/${completionId}`, options); + } + + /** + * Modify a stored chat completion. Only chat completions that have been created + * with the `store` parameter set to `true` can be modified. Currently, the only + * supported modification is to update the `metadata` field. + */ + update( + completionId: string, + body: ChatCompletionUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/chat/completions/${completionId}`, { body, ...options }); + } + + /** + * List stored chat completions. Only chat completions that have been stored with + * the `store` parameter set to `true` will be returned. + */ + list( + query?: ChatCompletionListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list(options?: Core.RequestOptions): Core.PagePromise; + list( + query: ChatCompletionListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.getAPIList('/chat/completions', ChatCompletionsPage, { query, ...options }); + } + + /** + * Delete a stored chat completion. Only chat completions that have been created + * with the `store` parameter set to `true` can be deleted. + */ + del(completionId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/chat/completions/${completionId}`, options); + } } +export class ChatCompletionsPage extends CursorPage {} + +export class ChatCompletionStoreMessagesPage extends CursorPage {} + /** * Represents a chat completion response returned by model, based on the provided * input. @@ -119,7 +177,7 @@ export namespace ChatCompletion { /** * A chat completion message generated by the model. */ - message: ChatCompletionsAPI.ChatCompletionMessage; + message: CompletionsCompletionsAPI.ChatCompletionMessage; } export namespace Choice { @@ -130,12 +188,12 @@ export namespace ChatCompletion { /** * A list of message content tokens with log probability information. */ - content: Array | null; + content: Array | null; /** * A list of message refusal tokens with log probability information. */ - refusal: Array | null; + refusal: Array | null; } } } @@ -437,12 +495,12 @@ export namespace ChatCompletionChunk { /** * A list of message content tokens with log probability information. */ - content: Array | null; + content: Array | null; /** * A list of message refusal tokens with log probability information. */ - refusal: Array | null; + refusal: Array | null; } } } @@ -537,6 +595,23 @@ export interface ChatCompletionContentPartText { type: 'text'; } +export interface ChatCompletionDeleted { + /** + * The ID of the chat completion that was deleted. + */ + id: string; + + /** + * Whether the chat completion was deleted. + */ + deleted: boolean; + + /** + * The type of object being deleted. + */ + object: 'chat.completion.deleted'; +} + /** * Developer-provided instructions that the model should follow, regardless of * messages sent by the user. With o1 models and newer, `developer` messages @@ -758,6 +833,16 @@ export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high' | null; */ export type ChatCompletionRole = 'developer' | 'system' | 'user' | 'assistant' | 'tool' | 'function'; +/** + * A chat completion message generated by the model. + */ +export interface ChatCompletionStoreMessage extends ChatCompletionMessage { + /** + * The identifier of the chat message. + */ + id: string; +} + /** * Options for streaming response. Only set this when you set `stream: true`. */ @@ -1229,8 +1314,9 @@ export namespace ChatCompletionCreateParams { } export type ChatCompletionCreateParamsNonStreaming = - ChatCompletionsAPI.ChatCompletionCreateParamsNonStreaming; - export type ChatCompletionCreateParamsStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsStreaming; + CompletionsCompletionsAPI.ChatCompletionCreateParamsNonStreaming; + export type ChatCompletionCreateParamsStreaming = + CompletionsCompletionsAPI.ChatCompletionCreateParamsStreaming; } /** @@ -1272,6 +1358,51 @@ export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreat */ export type CompletionCreateParamsStreaming = ChatCompletionCreateParamsStreaming; +export interface ChatCompletionUpdateParams { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; +} + +/** + * @deprecated Use ChatCompletionUpdateParams instead + */ +export type CompletionUpdateParams = ChatCompletionUpdateParams; + +export interface ChatCompletionListParams extends CursorPageParams { + /** + * A list of metadata keys to filter the chat completions by. Example: + * + * `metadata[key1]=value1&metadata[key2]=value2` + */ + metadata?: Shared.Metadata | null; + + /** + * The model used to generate the chat completions. + */ + model?: string; + + /** + * Sort order for chat completions by timestamp. Use `asc` for ascending order or + * `desc` for descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; +} + +/** + * @deprecated Use ChatCompletionListParams instead + */ +export type CompletionListParams = ChatCompletionListParams; + +Completions.ChatCompletionsPage = ChatCompletionsPage; +Completions.Messages = Messages; + export declare namespace Completions { export { type ChatCompletion as ChatCompletion, @@ -1284,6 +1415,7 @@ export declare namespace Completions { type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeleted as ChatCompletionDeleted, type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, @@ -1295,6 +1427,7 @@ export declare namespace Completions { type ChatCompletionPredictionContent as ChatCompletionPredictionContent, type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, @@ -1303,11 +1436,18 @@ export declare namespace Completions { type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type CompletionCreateParams as CompletionCreateParams, type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + type ChatCompletionUpdateParams as ChatCompletionUpdateParams, + type CompletionUpdateParams as CompletionUpdateParams, + type ChatCompletionListParams as ChatCompletionListParams, + type CompletionListParams as CompletionListParams, }; + + export { Messages as Messages, type MessageListParams as MessageListParams }; } diff --git a/src/resources/chat/completions/index.ts b/src/resources/chat/completions/index.ts new file mode 100644 index 000000000..3691f41d8 --- /dev/null +++ b/src/resources/chat/completions/index.ts @@ -0,0 +1,49 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + ChatCompletionStoreMessagesPage, + ChatCompletionsPage, + Completions, + type ChatCompletion, + type ChatCompletionAssistantMessageParam, + type ChatCompletionAudio, + type ChatCompletionAudioParam, + type ChatCompletionChunk, + type ChatCompletionContentPart, + type ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText, + type ChatCompletionDeleted, + type ChatCompletionDeveloperMessageParam, + type ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam, + type ChatCompletionMessage, + type ChatCompletionMessageParam, + type ChatCompletionMessageToolCall, + type ChatCompletionModality, + type ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent, + type ChatCompletionReasoningEffort, + type ChatCompletionRole, + type ChatCompletionStoreMessage, + type ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob, + type ChatCompletionTool, + type ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage, + type ChatCompletionCreateParams, + type CompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming, + type CompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming, + type CompletionCreateParamsStreaming, + type ChatCompletionUpdateParams, + type CompletionUpdateParams, + type ChatCompletionListParams, + type CompletionListParams, +} from './completions'; +export { Messages, type MessageListParams } from './messages'; diff --git a/src/resources/chat/completions/messages.ts b/src/resources/chat/completions/messages.ts new file mode 100644 index 000000000..fc1cc5d94 --- /dev/null +++ b/src/resources/chat/completions/messages.ts @@ -0,0 +1,52 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import * as CompletionsAPI from './completions'; +import { ChatCompletionStoreMessagesPage } from './completions'; +import { type CursorPageParams } from '../../../pagination'; + +export class Messages extends APIResource { + /** + * Get the messages in a stored chat completion. Only chat completions that have + * been created with the `store` parameter set to `true` will be returned. + */ + list( + completionId: string, + query?: MessageListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + completionId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + completionId: string, + query: MessageListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(completionId, {}, query); + } + return this._client.getAPIList( + `/chat/completions/${completionId}/messages`, + ChatCompletionStoreMessagesPage, + { query, ...options }, + ); + } +} + +export interface MessageListParams extends CursorPageParams { + /** + * Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + * for descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; +} + +export declare namespace Messages { + export { type MessageListParams as MessageListParams }; +} + +export { ChatCompletionStoreMessagesPage }; diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index c3be19402..a9b5b46fb 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -2,6 +2,8 @@ export { Chat, type ChatModel } from './chat'; export { + ChatCompletionStoreMessagesPage, + ChatCompletionsPage, Completions, type ChatCompletion, type ChatCompletionAssistantMessageParam, @@ -13,6 +15,7 @@ export { type ChatCompletionContentPartInputAudio, type ChatCompletionContentPartRefusal, type ChatCompletionContentPartText, + type ChatCompletionDeleted, type ChatCompletionDeveloperMessageParam, type ChatCompletionFunctionCallOption, type ChatCompletionFunctionMessageParam, @@ -24,6 +27,7 @@ export { type ChatCompletionPredictionContent, type ChatCompletionReasoningEffort, type ChatCompletionRole, + type ChatCompletionStoreMessage, type ChatCompletionStreamOptions, type ChatCompletionSystemMessageParam, type ChatCompletionTokenLogprob, @@ -38,4 +42,8 @@ export { type CompletionCreateParamsNonStreaming, type ChatCompletionCreateParamsStreaming, type CompletionCreateParamsStreaming, -} from './completions'; + type ChatCompletionUpdateParams, + type CompletionUpdateParams, + type ChatCompletionListParams, + type CompletionListParams, +} from './completions/index'; diff --git a/src/resources/completions.ts b/src/resources/completions.ts index be75a46f0..664e39d9d 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -4,7 +4,7 @@ import { APIResource } from '../resource'; import { APIPromise } from '../core'; import * as Core from '../core'; import * as CompletionsAPI from './completions'; -import * as ChatCompletionsAPI from './chat/completions'; +import * as CompletionsCompletionsAPI from './chat/completions/completions'; import { Stream } from '../streaming'; export class Completions extends APIResource { @@ -311,7 +311,7 @@ export interface CompletionCreateParamsBase { /** * Options for streaming response. Only set this when you set `stream: true`. */ - stream_options?: ChatCompletionsAPI.ChatCompletionStreamOptions | null; + stream_options?: CompletionsCompletionsAPI.ChatCompletionStreamOptions | null; /** * The suffix that comes after a completion of inserted text. diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index f7b16166d..86e90376d 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -75,14 +75,14 @@ export namespace Moderation { * execution of wrongdoing, or that gives advice or instruction on how to commit * illicit acts. For example, "how to shoplift" would fit this category. */ - illicit: boolean; + illicit: boolean | null; /** * Content that includes instructions or advice that facilitate the planning or * execution of wrongdoing that also includes violence, or that gives advice or * instruction on the procurement of any weapon. */ - 'illicit/violent': boolean; + 'illicit/violent': boolean | null; /** * Content that promotes, encourages, or depicts acts of self-harm, such as diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts deleted file mode 100644 index 8f1bc7d4c..000000000 --- a/tests/api-resources/chat/completions.test.ts +++ /dev/null @@ -1,65 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import OpenAI from 'openai'; -import { Response } from 'node-fetch'; - -const client = new OpenAI({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', -}); - -describe('resource completions', () => { - test('create: only required params', async () => { - const responsePromise = client.chat.completions.create({ - messages: [{ content: 'string', role: 'developer' }], - model: 'gpt-4o', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('create: required and optional params', async () => { - const response = await client.chat.completions.create({ - messages: [{ content: 'string', role: 'developer', name: 'name' }], - model: 'gpt-4o', - audio: { format: 'wav', voice: 'alloy' }, - frequency_penalty: -2, - function_call: 'none', - functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], - logit_bias: { foo: 0 }, - logprobs: true, - max_completion_tokens: 0, - max_tokens: 0, - metadata: { foo: 'string' }, - modalities: ['text'], - n: 1, - parallel_tool_calls: true, - prediction: { content: 'string', type: 'content' }, - presence_penalty: -2, - reasoning_effort: 'low', - response_format: { type: 'text' }, - seed: 0, - service_tier: 'auto', - stop: 'string', - store: true, - stream: false, - stream_options: { include_usage: true }, - temperature: 1, - tool_choice: 'none', - tools: [ - { - function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, - type: 'function', - }, - ], - top_logprobs: 0, - top_p: 1, - user: 'user-1234', - }); - }); -}); diff --git a/tests/api-resources/chat/completions/completions.test.ts b/tests/api-resources/chat/completions/completions.test.ts new file mode 100644 index 000000000..acdd631db --- /dev/null +++ b/tests/api-resources/chat/completions/completions.test.ts @@ -0,0 +1,144 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource completions', () => { + test('create: only required params', async () => { + const responsePromise = client.chat.completions.create({ + messages: [{ content: 'string', role: 'developer' }], + model: 'gpt-4o', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.chat.completions.create({ + messages: [{ content: 'string', role: 'developer', name: 'name' }], + model: 'gpt-4o', + audio: { format: 'wav', voice: 'alloy' }, + frequency_penalty: -2, + function_call: 'none', + functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], + logit_bias: { foo: 0 }, + logprobs: true, + max_completion_tokens: 0, + max_tokens: 0, + metadata: { foo: 'string' }, + modalities: ['text'], + n: 1, + parallel_tool_calls: true, + prediction: { content: 'string', type: 'content' }, + presence_penalty: -2, + reasoning_effort: 'low', + response_format: { type: 'text' }, + seed: 0, + service_tier: 'auto', + stop: 'string', + store: true, + stream: false, + stream_options: { include_usage: true }, + temperature: 1, + tool_choice: 'none', + tools: [ + { + function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, + type: 'function', + }, + ], + top_logprobs: 0, + top_p: 1, + user: 'user-1234', + }); + }); + + test('retrieve', async () => { + const responsePromise = client.chat.completions.retrieve('completion_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.retrieve('completion_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('update: only required params', async () => { + const responsePromise = client.chat.completions.update('completion_id', { metadata: { foo: 'string' } }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('update: required and optional params', async () => { + const response = await client.chat.completions.update('completion_id', { metadata: { foo: 'string' } }); + }); + + test('list', async () => { + const responsePromise = client.chat.completions.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.chat.completions.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.list( + { after: 'after', limit: 0, metadata: { foo: 'string' }, model: 'model', order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.chat.completions.del('completion_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.del('completion_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/chat/completions/messages.test.ts b/tests/api-resources/chat/completions/messages.test.ts new file mode 100644 index 000000000..664106cb9 --- /dev/null +++ b/tests/api-resources/chat/completions/messages.test.ts @@ -0,0 +1,40 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource messages', () => { + test('list', async () => { + const responsePromise = client.chat.completions.messages.list('completion_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.messages.list('completion_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.messages.list( + 'completion_id', + { after: 'after', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); From f9897464738ddd6c3207be3530b03db7e522e52e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 13 Feb 2025 19:53:06 +0000 Subject: [PATCH 416/533] release: 4.85.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 023314f41..f48cc7f57 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.84.1" + ".": "4.85.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 444430307..290b2414d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.85.0 (2025-02-13) + +Full Changelog: [v4.84.1...v4.85.0](https://github.com/openai/openai-node/compare/v4.84.1...v4.85.0) + +### Features + +* **api:** add support for storing chat completions ([#1327](https://github.com/openai/openai-node/issues/1327)) ([8d77f8e](https://github.com/openai/openai-node/commit/8d77f8e3c4801b7fa1e7c6f50b48c1de1f43f3e6)) + + +### Bug Fixes + +* **realtime:** call .toString() on WebSocket url ([#1324](https://github.com/openai/openai-node/issues/1324)) ([09bc50d](https://github.com/openai/openai-node/commit/09bc50d439679b6acfd2441e69ee5aa18c00e5d9)) + ## 4.84.1 (2025-02-13) Full Changelog: [v4.84.0...v4.84.1](https://github.com/openai/openai-node/compare/v4.84.0...v4.84.1) diff --git a/jsr.json b/jsr.json index 3148d6fca..368f86c0b 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.84.1", + "version": "4.85.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 4686e3a97..dc61af02c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.84.1", + "version": "4.85.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 767424b0e..6483fa72b 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.84.1'; // x-release-please-version +export const VERSION = '4.85.0'; // x-release-please-version From 26d5868dd53045bc820a607100eab1070785f50c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 13 Feb 2025 22:37:36 +0000 Subject: [PATCH 417/533] fix(client): fix export map for index exports (#1328) --- package.json | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/package.json b/package.json index dc61af02c..46f58814d 100644 --- a/package.json +++ b/package.json @@ -112,17 +112,17 @@ "default": "./dist/index.mjs" }, "./*.mjs": { - "types": "./dist/*.d.ts", - "default": "./dist/*.mjs" + "types": ["./dist/*.d.ts", "./dist/*/index.d.ts"], + "default": ["./dist/*.mjs", "./dist/*/index.mjs"] }, "./*.js": { - "types": "./dist/*.d.ts", - "default": "./dist/*.js" + "types": ["./dist/*.d.ts", "./dist/*/index.d.ts"], + "default": ["./dist/*.js", "./dist/*/index.js"] }, "./*": { - "types": "./dist/*.d.ts", - "require": "./dist/*.js", - "default": "./dist/*.mjs" + "types": ["./dist/*.d.ts", "./dist/*/index.d.ts"], + "require": ["./dist/*.js", "./dist/*/index.js"], + "default": ["./dist/*.mjs", "./dist/*/index.mjs"] } }, "bin": "./bin/cli", From 1f38cc1976f4091a90a38d49e6ddc1c22e5c39ab Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 14 Feb 2025 10:19:22 +0000 Subject: [PATCH 418/533] fix(package): add chat/completions.ts back in (#1333) --- src/resources/chat/completions.ts | 1 + 1 file changed, 1 insertion(+) create mode 100644 src/resources/chat/completions.ts diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts new file mode 100644 index 000000000..55b151e8b --- /dev/null +++ b/src/resources/chat/completions.ts @@ -0,0 +1 @@ +export * from './completions/completions'; From 13aab101588c2eee1250d7c50b2abfeca1c5fa3d Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 14 Feb 2025 10:30:35 +0000 Subject: [PATCH 419/533] chore(internal): add missing return type annotation (#1334) --- src/pagination.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pagination.ts b/src/pagination.ts index ad90a3a74..7a513fc44 100644 --- a/src/pagination.ts +++ b/src/pagination.ts @@ -77,7 +77,7 @@ export class CursorPage return this.data ?? []; } - override hasNextPage() { + override hasNextPage(): boolean { if (this.has_more === false) { return false; } From b9460fbc7ca9639df91c0b7184eea9c7631ae313 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 14 Feb 2025 10:28:44 +0000 Subject: [PATCH 420/533] CI: add ecosystem tests (#1332) --- .github/workflows/ci.yml | 33 +++++++++++++++++++++++++++++++++ .gitignore | 2 +- ecosystem-tests/cli.ts | 1 + 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d6798e38a..85d792c44 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -64,3 +64,36 @@ jobs: - name: Run tests run: ./scripts/test + + ecosystem_tests: + name: ecosystem tests (v${{ matrix.node-version }}) + runs-on: ubuntu-latest + if: github.repository == 'openai/openai-node' + timeout-minutes: 20 + strategy: + fail-fast: false + matrix: + node-version: ['20'] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node + uses: actions/setup-node@v4 + with: + node-version: '${{ matrix.node-version }}' + + - uses: denoland/setup-deno@v1 + with: + deno-version: v1.39.0 + + - uses: oven-sh/setup-bun@v2 + + - name: Bootstrap + run: ./scripts/bootstrap + + - name: Run ecosystem tests + run: | + yarn tsn ecosystem-tests/cli.ts --live --verbose --parallel --jobs=4 --retry=3 + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} diff --git a/.gitignore b/.gitignore index 81c4c41ca..3fdab1cb7 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,4 @@ tmp .pack ecosystem-tests/deno/package.json ecosystem-tests/*/openai.tgz - +.dev.vars diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index 4803b47c2..77faddec5 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -70,6 +70,7 @@ const projectRunners = { 'cloudflare-worker': async () => { await installPackage(); + await fs.writeFile('.dev.vars', `OPENAI_API_KEY='${process.env['OPENAI_API_KEY']}'`); await run('npm', ['run', 'tsc']); if (state.live) { From 212710db8c8c139392c6532c0eccfd13558ef2d4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 14 Feb 2025 10:31:06 +0000 Subject: [PATCH 421/533] release: 4.85.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 +++++++++++++ jsr.json | 2 +- package.json | 37 +++++++++++++++++++++++++++-------- src/version.ts | 2 +- 5 files changed, 46 insertions(+), 11 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f48cc7f57..89f1ce153 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.85.0" + ".": "4.85.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 290b2414d..9850ac460 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.85.1 (2025-02-14) + +Full Changelog: [v4.85.0...v4.85.1](https://github.com/openai/openai-node/compare/v4.85.0...v4.85.1) + +### Bug Fixes + +* **client:** fix export map for index exports ([#1328](https://github.com/openai/openai-node/issues/1328)) ([647ba7a](https://github.com/openai/openai-node/commit/647ba7a52311928f604c72b2cc95698c0837887f)) +* **package:** add chat/completions.ts back in ([#1333](https://github.com/openai/openai-node/issues/1333)) ([e4b5546](https://github.com/openai/openai-node/commit/e4b554632ab1646da831f29413fefb3378c49cc1)) + + +### Chores + +* **internal:** add missing return type annotation ([#1334](https://github.com/openai/openai-node/issues/1334)) ([53e0856](https://github.com/openai/openai-node/commit/53e0856ec4d36deee4d71b5aaf436df0a59b9402)) + ## 4.85.0 (2025-02-13) Full Changelog: [v4.84.1...v4.85.0](https://github.com/openai/openai-node/compare/v4.84.1...v4.85.0) diff --git a/jsr.json b/jsr.json index 368f86c0b..0e1eea3b3 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.85.0", + "version": "4.85.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 46f58814d..45337f85d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.85.0", + "version": "4.85.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", @@ -112,17 +112,38 @@ "default": "./dist/index.mjs" }, "./*.mjs": { - "types": ["./dist/*.d.ts", "./dist/*/index.d.ts"], - "default": ["./dist/*.mjs", "./dist/*/index.mjs"] + "types": [ + "./dist/*.d.ts", + "./dist/*/index.d.ts" + ], + "default": [ + "./dist/*.mjs", + "./dist/*/index.mjs" + ] }, "./*.js": { - "types": ["./dist/*.d.ts", "./dist/*/index.d.ts"], - "default": ["./dist/*.js", "./dist/*/index.js"] + "types": [ + "./dist/*.d.ts", + "./dist/*/index.d.ts" + ], + "default": [ + "./dist/*.js", + "./dist/*/index.js" + ] }, "./*": { - "types": ["./dist/*.d.ts", "./dist/*/index.d.ts"], - "require": ["./dist/*.js", "./dist/*/index.js"], - "default": ["./dist/*.mjs", "./dist/*/index.mjs"] + "types": [ + "./dist/*.d.ts", + "./dist/*/index.d.ts" + ], + "require": [ + "./dist/*.js", + "./dist/*/index.js" + ], + "default": [ + "./dist/*.mjs", + "./dist/*/index.mjs" + ] } }, "bin": "./bin/cli", diff --git a/src/version.ts b/src/version.ts index 6483fa72b..52fb45056 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.85.0'; // x-release-please-version +export const VERSION = '4.85.1'; // x-release-please-version From b0b4189420e1c5bb5fc4bbb8925f88fe65f9b217 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Feb 2025 18:03:31 +0000 Subject: [PATCH 422/533] fix: optimize sse chunk reading off-by-one error (#1339) --- src/internal/decoders/line.ts | 31 +++++++ src/streaming.ts | 48 +--------- tests/internal/decoders/line.test.ts | 128 +++++++++++++++++++++++++++ tests/streaming.test.ts | 81 +---------------- 4 files changed, 161 insertions(+), 127 deletions(-) create mode 100644 tests/internal/decoders/line.test.ts diff --git a/src/internal/decoders/line.ts b/src/internal/decoders/line.ts index 66f62c057..947f240b3 100644 --- a/src/internal/decoders/line.ts +++ b/src/internal/decoders/line.ts @@ -143,3 +143,34 @@ function findNewlineIndex( return null; } + +export function findDoubleNewlineIndex(buffer: Uint8Array): number { + // This function searches the buffer for the end patterns (\r\r, \n\n, \r\n\r\n) + // and returns the index right after the first occurrence of any pattern, + // or -1 if none of the patterns are found. + const newline = 0x0a; // \n + const carriage = 0x0d; // \r + + for (let i = 0; i < buffer.length - 1; i++) { + if (buffer[i] === newline && buffer[i + 1] === newline) { + // \n\n + return i + 2; + } + if (buffer[i] === carriage && buffer[i + 1] === carriage) { + // \r\r + return i + 2; + } + if ( + buffer[i] === carriage && + buffer[i + 1] === newline && + i + 3 < buffer.length && + buffer[i + 2] === carriage && + buffer[i + 3] === newline + ) { + // \r\n\r\n + return i + 4; + } + } + + return -1; +} diff --git a/src/streaming.ts b/src/streaming.ts index 1d1ae344b..52266154c 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -1,6 +1,6 @@ import { ReadableStream, type Response } from './_shims/index'; import { OpenAIError } from './error'; -import { LineDecoder } from './internal/decoders/line'; +import { findDoubleNewlineIndex, LineDecoder } from './internal/decoders/line'; import { ReadableStreamToAsyncIterable } from './internal/stream-utils'; import { APIError } from './error'; @@ -259,37 +259,6 @@ async function* iterSSEChunks(iterator: AsyncIterableIterator): AsyncGene } } -function findDoubleNewlineIndex(buffer: Uint8Array): number { - // This function searches the buffer for the end patterns (\r\r, \n\n, \r\n\r\n) - // and returns the index right after the first occurrence of any pattern, - // or -1 if none of the patterns are found. - const newline = 0x0a; // \n - const carriage = 0x0d; // \r - - for (let i = 0; i < buffer.length - 2; i++) { - if (buffer[i] === newline && buffer[i + 1] === newline) { - // \n\n - return i + 2; - } - if (buffer[i] === carriage && buffer[i + 1] === carriage) { - // \r\r - return i + 2; - } - if ( - buffer[i] === carriage && - buffer[i + 1] === newline && - i + 3 < buffer.length && - buffer[i + 2] === carriage && - buffer[i + 3] === newline - ) { - // \r\n\r\n - return i + 4; - } - } - - return -1; -} - class SSEDecoder { private data: string[]; private event: string | null; @@ -345,21 +314,6 @@ class SSEDecoder { } } -/** This is an internal helper function that's just used for testing */ -export function _decodeChunks(chunks: string[], { flush }: { flush: boolean } = { flush: false }): string[] { - const decoder = new LineDecoder(); - const lines: string[] = []; - for (const chunk of chunks) { - lines.push(...decoder.decode(chunk)); - } - - if (flush) { - lines.push(...decoder.flush()); - } - - return lines; -} - function partition(str: string, delimiter: string): [string, string, string] { const index = str.indexOf(delimiter); if (index !== -1) { diff --git a/tests/internal/decoders/line.test.ts b/tests/internal/decoders/line.test.ts new file mode 100644 index 000000000..e76858e55 --- /dev/null +++ b/tests/internal/decoders/line.test.ts @@ -0,0 +1,128 @@ +import { findDoubleNewlineIndex, LineDecoder } from 'openai/internal/decoders/line'; + +function decodeChunks(chunks: string[], { flush }: { flush: boolean } = { flush: false }): string[] { + const decoder = new LineDecoder(); + const lines: string[] = []; + for (const chunk of chunks) { + lines.push(...decoder.decode(chunk)); + } + + if (flush) { + lines.push(...decoder.flush()); + } + + return lines; +} + +describe('line decoder', () => { + test('basic', () => { + // baz is not included because the line hasn't ended yet + expect(decodeChunks(['foo', ' bar\nbaz'])).toEqual(['foo bar']); + }); + + test('basic with \\r', () => { + expect(decodeChunks(['foo', ' bar\r\nbaz'])).toEqual(['foo bar']); + expect(decodeChunks(['foo', ' bar\r\nbaz'], { flush: true })).toEqual(['foo bar', 'baz']); + }); + + test('trailing new lines', () => { + expect(decodeChunks(['foo', ' bar', 'baz\n', 'thing\n'])).toEqual(['foo barbaz', 'thing']); + }); + + test('trailing new lines with \\r', () => { + expect(decodeChunks(['foo', ' bar', 'baz\r\n', 'thing\r\n'])).toEqual(['foo barbaz', 'thing']); + }); + + test('escaped new lines', () => { + expect(decodeChunks(['foo', ' bar\\nbaz\n'])).toEqual(['foo bar\\nbaz']); + }); + + test('escaped new lines with \\r', () => { + expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']); + }); + + test('\\r & \\n split across multiple chunks', () => { + expect(decodeChunks(['foo\r', '\n', 'bar'], { flush: true })).toEqual(['foo', 'bar']); + }); + + test('single \\r', () => { + expect(decodeChunks(['foo\r', 'bar'], { flush: true })).toEqual(['foo', 'bar']); + }); + + test('double \\r', () => { + expect(decodeChunks(['foo\r', 'bar\r'], { flush: true })).toEqual(['foo', 'bar']); + expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + // implementation detail that we don't yield the single \r line until a new \r or \n is encountered + expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: false })).toEqual(['foo']); + }); + + test('double \\r then \\r\\n', () => { + expect(decodeChunks(['foo\r', '\r', '\r', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); + expect(decodeChunks(['foo\n', '\n', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); + }); + + test('double newline', () => { + expect(decodeChunks(['foo\n\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo', '\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo', '\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + }); + + test('multi-byte characters across chunks', () => { + const decoder = new LineDecoder(); + + // bytes taken from the string 'известни' and arbitrarily split + // so that some multi-byte characters span multiple chunks + expect(decoder.decode(new Uint8Array([0xd0]))).toHaveLength(0); + expect(decoder.decode(new Uint8Array([0xb8, 0xd0, 0xb7, 0xd0]))).toHaveLength(0); + expect( + decoder.decode(new Uint8Array([0xb2, 0xd0, 0xb5, 0xd1, 0x81, 0xd1, 0x82, 0xd0, 0xbd, 0xd0, 0xb8])), + ).toHaveLength(0); + + const decoded = decoder.decode(new Uint8Array([0xa])); + expect(decoded).toEqual(['известни']); + }); + + test('flushing trailing newlines', () => { + expect(decodeChunks(['foo\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + }); + + test('flushing empty buffer', () => { + expect(decodeChunks([], { flush: true })).toEqual([]); + }); +}); + +describe('findDoubleNewlineIndex', () => { + test('finds \\n\\n', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\n\nbar'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\n\nbar'))).toBe(2); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\n\n'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\n\n'))).toBe(2); + }); + + test('finds \\r\\r', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\rbar'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\rbar'))).toBe(2); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\r'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\r'))).toBe(2); + }); + + test('finds \\r\\n\\r\\n', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n\r\nbar'))).toBe(7); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\n\r\nbar'))).toBe(4); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n\r\n'))).toBe(7); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\n\r\n'))).toBe(4); + }); + + test('returns -1 when no double newline found', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\nbar'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\rbar'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\nbar'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode(''))).toBe(-1); + }); + + test('handles incomplete patterns', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n\r'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n'))).toBe(-1); + }); +}); diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts index 8e5d0ca31..b9a38f208 100644 --- a/tests/streaming.test.ts +++ b/tests/streaming.test.ts @@ -1,86 +1,7 @@ import { Response } from 'node-fetch'; import { PassThrough } from 'stream'; import assert from 'assert'; -import { _iterSSEMessages, _decodeChunks as decodeChunks } from 'openai/streaming'; -import { LineDecoder } from 'openai/internal/decoders/line'; - -describe('line decoder', () => { - test('basic', () => { - // baz is not included because the line hasn't ended yet - expect(decodeChunks(['foo', ' bar\nbaz'])).toEqual(['foo bar']); - }); - - test('basic with \\r', () => { - expect(decodeChunks(['foo', ' bar\r\nbaz'])).toEqual(['foo bar']); - expect(decodeChunks(['foo', ' bar\r\nbaz'], { flush: true })).toEqual(['foo bar', 'baz']); - }); - - test('trailing new lines', () => { - expect(decodeChunks(['foo', ' bar', 'baz\n', 'thing\n'])).toEqual(['foo barbaz', 'thing']); - }); - - test('trailing new lines with \\r', () => { - expect(decodeChunks(['foo', ' bar', 'baz\r\n', 'thing\r\n'])).toEqual(['foo barbaz', 'thing']); - }); - - test('escaped new lines', () => { - expect(decodeChunks(['foo', ' bar\\nbaz\n'])).toEqual(['foo bar\\nbaz']); - }); - - test('escaped new lines with \\r', () => { - expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']); - }); - - test('\\r & \\n split across multiple chunks', () => { - expect(decodeChunks(['foo\r', '\n', 'bar'], { flush: true })).toEqual(['foo', 'bar']); - }); - - test('single \\r', () => { - expect(decodeChunks(['foo\r', 'bar'], { flush: true })).toEqual(['foo', 'bar']); - }); - - test('double \\r', () => { - expect(decodeChunks(['foo\r', 'bar\r'], { flush: true })).toEqual(['foo', 'bar']); - expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); - // implementation detail that we don't yield the single \r line until a new \r or \n is encountered - expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: false })).toEqual(['foo']); - }); - - test('double \\r then \\r\\n', () => { - expect(decodeChunks(['foo\r', '\r', '\r', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); - expect(decodeChunks(['foo\n', '\n', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); - }); - - test('double newline', () => { - expect(decodeChunks(['foo\n\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); - expect(decodeChunks(['foo', '\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); - expect(decodeChunks(['foo\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); - expect(decodeChunks(['foo', '\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); - }); - - test('multi-byte characters across chunks', () => { - const decoder = new LineDecoder(); - - // bytes taken from the string 'известни' and arbitrarily split - // so that some multi-byte characters span multiple chunks - expect(decoder.decode(new Uint8Array([0xd0]))).toHaveLength(0); - expect(decoder.decode(new Uint8Array([0xb8, 0xd0, 0xb7, 0xd0]))).toHaveLength(0); - expect( - decoder.decode(new Uint8Array([0xb2, 0xd0, 0xb5, 0xd1, 0x81, 0xd1, 0x82, 0xd0, 0xbd, 0xd0, 0xb8])), - ).toHaveLength(0); - - const decoded = decoder.decode(new Uint8Array([0xa])); - expect(decoded).toEqual(['известни']); - }); - - test('flushing trailing newlines', () => { - expect(decodeChunks(['foo\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); - }); - - test('flushing empty buffer', () => { - expect(decodeChunks([], { flush: true })).toEqual([]); - }); -}); +import { _iterSSEMessages } from 'openai/streaming'; describe('streaming decoding', () => { test('basic', async () => { From 2bce86509a45d96d17cfc837ddfd8ddc5995df8e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Feb 2025 18:04:10 +0000 Subject: [PATCH 423/533] release: 4.85.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 89f1ce153..541794534 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.85.1" + ".": "4.85.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9850ac460..70a447b0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.85.2 (2025-02-18) + +Full Changelog: [v4.85.1...v4.85.2](https://github.com/openai/openai-node/compare/v4.85.1...v4.85.2) + +### Bug Fixes + +* optimize sse chunk reading off-by-one error ([#1339](https://github.com/openai/openai-node/issues/1339)) ([c82795b](https://github.com/openai/openai-node/commit/c82795b189c73d1c0e3bc3a40d0d4a2558b0483a)) + ## 4.85.1 (2025-02-14) Full Changelog: [v4.85.0...v4.85.1](https://github.com/openai/openai-node/compare/v4.85.0...v4.85.1) diff --git a/jsr.json b/jsr.json index 0e1eea3b3..8f83c0ff2 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.85.1", + "version": "4.85.2", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 45337f85d..661bc2938 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.85.1", + "version": "4.85.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 52fb45056..4fdc11dc7 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.85.1'; // x-release-please-version +export const VERSION = '4.85.2'; // x-release-please-version From 6d056bf95c9be4046decf20ec4c98dfa2bea2723 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 20 Feb 2025 11:09:29 +0000 Subject: [PATCH 424/533] fix(parsing): remove tool_calls default empty array (#1341) --- src/lib/parser.ts | 17 +++++++++++++++-- src/resources/beta/chat/completions.ts | 2 +- tests/lib/ChatCompletionRunFunctions.test.ts | 20 ++++++++++---------- tests/lib/ChatCompletionStream.test.ts | 3 --- tests/lib/parser.test.ts | 6 ------ 5 files changed, 26 insertions(+), 22 deletions(-) diff --git a/src/lib/parser.ts b/src/lib/parser.ts index f2678e312..a750375dc 100644 --- a/src/lib/parser.ts +++ b/src/lib/parser.ts @@ -119,7 +119,15 @@ export function maybeParseChatCompletion< ...completion, choices: completion.choices.map((choice) => ({ ...choice, - message: { ...choice.message, parsed: null, tool_calls: choice.message.tool_calls ?? [] }, + message: { + ...choice.message, + parsed: null, + ...(choice.message.tool_calls ? + { + tool_calls: choice.message.tool_calls, + } + : undefined), + }, })), }; } @@ -144,7 +152,12 @@ export function parseChatCompletion< ...choice, message: { ...choice.message, - tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall(params, toolCall)) ?? [], + ...(choice.message.tool_calls ? + { + tool_calls: + choice.message.tool_calls?.map((toolCall) => parseToolCall(params, toolCall)) ?? undefined, + } + : undefined), parsed: choice.message.content && !choice.message.refusal ? parseResponseFormat(params, choice.message.content) diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts index c9360a95c..083b9914e 100644 --- a/src/resources/beta/chat/completions.ts +++ b/src/resources/beta/chat/completions.ts @@ -50,7 +50,7 @@ export interface ParsedFunctionToolCall extends ChatCompletionMessageToolCall { export interface ParsedChatCompletionMessage extends ChatCompletionMessage { parsed: ParsedT | null; - tool_calls: Array; + tool_calls?: Array; } export interface ParsedChoice extends ChatCompletion.Choice { diff --git a/tests/lib/ChatCompletionRunFunctions.test.ts b/tests/lib/ChatCompletionRunFunctions.test.ts index b684f204d..496501a86 100644 --- a/tests/lib/ChatCompletionRunFunctions.test.ts +++ b/tests/lib/ChatCompletionRunFunctions.test.ts @@ -628,7 +628,7 @@ describe('resource completions', () => { content: "it's raining", parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.functionCallResults).toEqual([`it's raining`]); @@ -876,7 +876,7 @@ describe('resource completions', () => { content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.functionCallResults).toEqual(['3']); @@ -1125,7 +1125,7 @@ describe('resource completions', () => { content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.functionCallResults).toEqual([`must be an object`, '3']); @@ -1443,7 +1443,7 @@ describe('resource completions', () => { content: "it's raining", parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.functionCallResults).toEqual([ @@ -1572,7 +1572,7 @@ describe('resource completions', () => { content: "it's raining", parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.eventFunctionCallResults).toEqual([`it's raining`]); @@ -1795,7 +1795,7 @@ describe('resource completions', () => { content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.eventFunctionCallResults).toEqual(['3']); @@ -1997,7 +1997,7 @@ describe('resource completions', () => { content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.eventFunctionCallResults).toEqual([`must be an object`, '3']); @@ -2301,7 +2301,7 @@ describe('resource completions', () => { content: "it's raining", parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.eventFunctionCallResults).toEqual([ @@ -2347,7 +2347,7 @@ describe('resource completions', () => { content: 'The weather is great today!', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }); await listener.sanityCheck(); }); @@ -2386,7 +2386,7 @@ describe('resource completions', () => { content: 'The weather is great today!', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }); await listener.sanityCheck(); }); diff --git a/tests/lib/ChatCompletionStream.test.ts b/tests/lib/ChatCompletionStream.test.ts index e5ef20c9e..34c5fd204 100644 --- a/tests/lib/ChatCompletionStream.test.ts +++ b/tests/lib/ChatCompletionStream.test.ts @@ -39,7 +39,6 @@ describe('.stream()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], }, } `); @@ -198,7 +197,6 @@ describe('.stream()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], }, } `); @@ -386,7 +384,6 @@ describe('.stream()', () => { "parsed": null, "refusal": "I'm very sorry, but I can't assist with that request.", "role": "assistant", - "tool_calls": [], }, } `); diff --git a/tests/lib/parser.test.ts b/tests/lib/parser.test.ts index b220e92d3..fa8123f5c 100644 --- a/tests/lib/parser.test.ts +++ b/tests/lib/parser.test.ts @@ -39,7 +39,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], }, } `); @@ -154,7 +153,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); @@ -488,7 +486,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); }); @@ -787,7 +784,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); }); @@ -947,7 +943,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); }); @@ -1061,7 +1056,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); }); From d92fd953309951f4d6dcc9858d8782ea1bff4c79 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Feb 2025 11:10:00 +0000 Subject: [PATCH 425/533] release: 4.85.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 541794534..712720117 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.85.2" + ".": "4.85.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 70a447b0a..36debfad1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.85.3 (2025-02-20) + +Full Changelog: [v4.85.2...v4.85.3](https://github.com/openai/openai-node/compare/v4.85.2...v4.85.3) + +### Bug Fixes + +* **parsing:** remove tool_calls default empty array ([#1341](https://github.com/openai/openai-node/issues/1341)) ([2672160](https://github.com/openai/openai-node/commit/26721608e61949daa9592483e89b79230bb9198a)) + ## 4.85.2 (2025-02-18) Full Changelog: [v4.85.1...v4.85.2](https://github.com/openai/openai-node/compare/v4.85.1...v4.85.2) diff --git a/jsr.json b/jsr.json index 8f83c0ff2..3c480dc70 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.85.2", + "version": "4.85.3", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 661bc2938..5fdd39fdc 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.85.2", + "version": "4.85.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 4fdc11dc7..679cac2c7 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.85.2'; // x-release-please-version +export const VERSION = '4.85.3'; // x-release-please-version From 9485f5d4d6718bff7f579223c9aa528898451533 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 21 Feb 2025 15:08:12 +0000 Subject: [PATCH 426/533] chore(internal): fix devcontainers setup (#1343) --- .devcontainer/Dockerfile | 23 ----------------------- .devcontainer/devcontainer.json | 27 ++++++++++++--------------- 2 files changed, 12 insertions(+), 38 deletions(-) delete mode 100644 .devcontainer/Dockerfile diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile deleted file mode 100644 index 8ea34be96..000000000 --- a/.devcontainer/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM debian:bookworm-slim AS stainless - -RUN apt-get update && apt-get install -y \ - nodejs \ - npm \ - yarnpkg \ - && apt-get clean autoclean - -# Ensure UTF-8 encoding -ENV LANG=C.UTF-8 -ENV LC_ALL=C.UTF-8 - -# Yarn -RUN ln -sf /usr/bin/yarnpkg /usr/bin/yarn - -WORKDIR /workspace - -COPY package.json yarn.lock /workspace/ - -RUN yarn install - -COPY . /workspace diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index d55fc4d67..763462fad 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,20 +1,17 @@ // For format details, see https://aka.ms/devcontainer.json. For config options, see the // README at: https://github.com/devcontainers/templates/tree/main/src/debian { - "name": "Debian", - "build": { - "dockerfile": "Dockerfile" + "name": "Development", + "image": "mcr.microsoft.com/devcontainers/typescript-node:latest", + "features": { + "ghcr.io/devcontainers/features/node:1": {} + }, + "postCreateCommand": "yarn install", + "customizations": { + "vscode": { + "extensions": [ + "esbenp.prettier-vscode" + ] + } } - - // Features to add to the dev container. More info: https://containers.dev/features. - // "features": {}, - - // Use 'forwardPorts' to make a list of ports inside the container available locally. - // "forwardPorts": [], - - // Configure tool-specific properties. - // "customizations": {}, - - // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. - // "remoteUser": "root" } From a1a125349ba9c9c2bb602c8c8f368e086c41ac1e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 22 Feb 2025 05:06:15 +0000 Subject: [PATCH 427/533] release: 4.85.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 712720117..6fc92ed1e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.85.3" + ".": "4.85.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 36debfad1..e2f920af7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.85.4 (2025-02-22) + +Full Changelog: [v4.85.3...v4.85.4](https://github.com/openai/openai-node/compare/v4.85.3...v4.85.4) + +### Chores + +* **internal:** fix devcontainers setup ([#1343](https://github.com/openai/openai-node/issues/1343)) ([cb1ec90](https://github.com/openai/openai-node/commit/cb1ec907832e325bc29abe94ae325e0477cb87d1)) + ## 4.85.3 (2025-02-20) Full Changelog: [v4.85.2...v4.85.3](https://github.com/openai/openai-node/compare/v4.85.2...v4.85.3) diff --git a/jsr.json b/jsr.json index 3c480dc70..7ced58a9c 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.85.3", + "version": "4.85.4", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 5fdd39fdc..38572079f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.85.3", + "version": "4.85.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 679cac2c7..ebfb680f1 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.85.3'; // x-release-please-version +export const VERSION = '4.85.4'; // x-release-please-version From bb269a1a6fda11c533fb88fa1250a342a5a11ed0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 20:05:21 +0000 Subject: [PATCH 428/533] feat(api): add gpt-4.5-preview (#1349) --- .stats.yml | 2 +- src/resources/beta/assistants.ts | 2 ++ src/resources/beta/realtime/realtime.ts | 24 +++++++++++++++++------- src/resources/beta/realtime/sessions.ts | 24 ++++++++++++++++++++++-- src/resources/chat/chat.ts | 2 ++ src/resources/files.ts | 5 +++++ src/resources/uploads/uploads.ts | 2 +- 7 files changed, 50 insertions(+), 11 deletions(-) diff --git a/.stats.yml b/.stats.yml index 658877d3b..163146e38 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 74 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-4aa6ee65ba9efc789e05e6a5ef0883b2cadf06def8efd863dbf75e9e233067e1.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5d30684c3118d049682ea30cdb4dbef39b97d51667da484689193dc40162af32.yml diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 0cc63d691..919bf53b3 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -1310,6 +1310,8 @@ export interface AssistantUpdateParams { | 'gpt-4o-2024-05-13' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' + | 'gpt-4.5-preview' + | 'gpt-4.5-preview-2025-02-27' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index e46dcdaaf..5e2b1c833 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -1796,11 +1796,14 @@ export interface SessionCreatedEvent { /** * Send this event to update the session’s default configuration. The client may - * send this event at any time to update the session configuration, and any field - * may be updated at any time, except for "voice". The server will respond with a - * `session.updated` event that shows the full effective configuration. Only fields - * that are present are updated, thus the correct way to clear a field like - * "instructions" is to pass an empty string. + * send this event at any time to update any field, except for `voice`. However, + * note that once a session has been initialized with a particular `model`, it + * can’t be changed to another model using `session.update`. + * + * When the server receives a `session.update`, it will respond with a + * `session.updated` event showing the full, effective configuration. Only the + * fields that are present are updated. To clear a field like `instructions`, pass + * an empty string. */ export interface SessionUpdateEvent { /** @@ -1982,11 +1985,18 @@ export namespace SessionUpdateEvent { */ export interface TurnDetection { /** - * Whether or not to automatically generate a response when VAD is enabled. `true` - * by default. + * Whether or not to automatically generate a response when a VAD stop event + * occurs. `true` by default. */ create_response?: boolean; + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. `true` by default. + */ + interrupt_response?: boolean; + /** * Amount of audio to include before the VAD detected speech (in milliseconds). * Defaults to 300ms. diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index d2afa25b1..a99c9e045 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -168,6 +168,19 @@ export namespace Session { * volume and respond at the end of user speech. */ export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. `true` by default. + */ + create_response?: boolean; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. `true` by default. + */ + interrupt_response?: boolean; + /** * Amount of audio to include before the VAD detected speech (in milliseconds). * Defaults to 300ms. @@ -532,11 +545,18 @@ export namespace SessionCreateParams { */ export interface TurnDetection { /** - * Whether or not to automatically generate a response when VAD is enabled. `true` - * by default. + * Whether or not to automatically generate a response when a VAD stop event + * occurs. `true` by default. */ create_response?: boolean; + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. `true` by default. + */ + interrupt_response?: boolean; + /** * Amount of audio to include before the VAD detected speech (in milliseconds). * Defaults to 300ms. diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 5bceec45a..627b4fc23 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -61,6 +61,8 @@ export type ChatModel = | 'o1-preview-2024-09-12' | 'o1-mini' | 'o1-mini-2024-09-12' + | 'gpt-4.5-preview' + | 'gpt-4.5-preview-2025-02-27' | 'gpt-4o' | 'gpt-4o-2024-11-20' | 'gpt-4o-2024-08-06' diff --git a/src/resources/files.ts b/src/resources/files.ts index 67bc95469..f5f23dcad 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -173,6 +173,11 @@ export interface FileObject { */ status: 'uploaded' | 'processed' | 'error'; + /** + * The Unix timestamp (in seconds) for when the file will expire. + */ + expires_at?: number; + /** * @deprecated Deprecated. For details on why a fine-tuning training file failed * validation, see the `error` field on `fine_tuning.job`. diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index bfe752cd7..f977e18f6 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -86,7 +86,7 @@ export interface Upload { created_at: number; /** - * The Unix timestamp (in seconds) for when the Upload was created. + * The Unix timestamp (in seconds) for when the Upload will expire. */ expires_at: number; From f93c5bc81d2b58dd821b9c11a789d2750951837d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 20:05:52 +0000 Subject: [PATCH 429/533] release: 4.86.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6fc92ed1e..28ebbc3ab 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.85.4" + ".": "4.86.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e2f920af7..48445f98a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.86.0 (2025-02-27) + +Full Changelog: [v4.85.4...v4.86.0](https://github.com/openai/openai-node/compare/v4.85.4...v4.86.0) + +### Features + +* **api:** add gpt-4.5-preview ([#1349](https://github.com/openai/openai-node/issues/1349)) ([2a1d36b](https://github.com/openai/openai-node/commit/2a1d36b560323fca058f98607775642370e90a47)) + ## 4.85.4 (2025-02-22) Full Changelog: [v4.85.3...v4.85.4](https://github.com/openai/openai-node/compare/v4.85.3...v4.85.4) diff --git a/jsr.json b/jsr.json index 7ced58a9c..28a13dd6b 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.85.4", + "version": "4.86.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 38572079f..be7052b15 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.85.4", + "version": "4.86.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index ebfb680f1..d342ca5d3 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.85.4'; // x-release-please-version +export const VERSION = '4.86.0'; // x-release-please-version From 634a209a6025640e2849133f6997af8faa28d4d8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 22:00:53 +0000 Subject: [PATCH 430/533] docs: update URLs from stainlessapi.com to stainless.com (#1352) More details at https://www.stainless.com/changelog/stainless-com --- SECURITY.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index c54acaf33..3b3bd8a66 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,9 +2,9 @@ ## Reporting Security Issues -This SDK is generated by [Stainless Software Inc](http://stainlessapi.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. +This SDK is generated by [Stainless Software Inc](http://stainless.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. -To report a security issue, please contact the Stainless team at security@stainlessapi.com. +To report a security issue, please contact the Stainless team at security@stainless.com. ## Responsible Disclosure From 0d3045ea19e34712fb395000545fcce3f9201149 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2025 22:01:25 +0000 Subject: [PATCH 431/533] release: 4.86.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 28ebbc3ab..92b3782ff 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.86.0" + ".": "4.86.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 48445f98a..9dd57c5ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.86.1 (2025-02-27) + +Full Changelog: [v4.86.0...v4.86.1](https://github.com/openai/openai-node/compare/v4.86.0...v4.86.1) + +### Documentation + +* update URLs from stainlessapi.com to stainless.com ([#1352](https://github.com/openai/openai-node/issues/1352)) ([8294e9e](https://github.com/openai/openai-node/commit/8294e9ef57ed98722105b56d205ebea9d028f671)) + ## 4.86.0 (2025-02-27) Full Changelog: [v4.85.4...v4.86.0](https://github.com/openai/openai-node/compare/v4.85.4...v4.86.0) diff --git a/jsr.json b/jsr.json index 28a13dd6b..c3addf639 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.86.0", + "version": "4.86.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index be7052b15..236815732 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.86.0", + "version": "4.86.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index d342ca5d3..759b28a99 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.86.0'; // x-release-please-version +export const VERSION = '4.86.1'; // x-release-please-version From 1044c487566569e773d5f6c1a94ce6b614e62b80 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 4 Mar 2025 21:17:33 +0000 Subject: [PATCH 432/533] chore(internal): run example files in CI (#1357) --- .github/workflows/ci.yml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 85d792c44..fe24c0dcb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -65,6 +65,26 @@ jobs: - name: Run tests run: ./scripts/test + examples: + name: examples + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node + uses: actions/setup-node@v4 + with: + node-version: '18' + - name: Install dependencies + run: | + yarn install + + - env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + yarn tsn examples/demo.ts + ecosystem_tests: name: ecosystem tests (v${{ matrix.node-version }}) runs-on: ubuntu-latest From 6e00ac242554d5f2b86852a082cab2538c605bc9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Mar 2025 05:07:14 +0000 Subject: [PATCH 433/533] release: 4.86.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 92b3782ff..a889d24b4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.86.1" + ".": "4.86.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9dd57c5ae..38d54fdc1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.86.2 (2025-03-05) + +Full Changelog: [v4.86.1...v4.86.2](https://github.com/openai/openai-node/compare/v4.86.1...v4.86.2) + +### Chores + +* **internal:** run example files in CI ([#1357](https://github.com/openai/openai-node/issues/1357)) ([88d0050](https://github.com/openai/openai-node/commit/88d0050336749deb3810b4cb43473de1f84e42bd)) + ## 4.86.1 (2025-02-27) Full Changelog: [v4.86.0...v4.86.1](https://github.com/openai/openai-node/compare/v4.86.0...v4.86.1) diff --git a/jsr.json b/jsr.json index c3addf639..1c0948aaa 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.86.1", + "version": "4.86.2", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 236815732..78afb8946 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.86.1", + "version": "4.86.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 759b28a99..c43a3c320 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.86.1'; // x-release-please-version +export const VERSION = '4.86.2'; // x-release-please-version From 06122424a4d783aff07b7089b64986fb35bc24e4 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 11 Mar 2025 11:29:02 -0400 Subject: [PATCH 434/533] feat(api): add /v1/responses and built-in tools [platform.openai.com/docs/changelog](http://platform.openai.com/docs/changelog) --- .stats.yml | 4 +- README.md | 151 +- api.md | 224 +- examples/responses/stream.ts | 24 + examples/responses/streaming-tools.ts | 52 + .../responses/structured-outputs-tools.ts | 60 + examples/responses/structured-outputs.ts | 32 + examples/yarn.lock | 0 scripts/bootstrap | 2 +- src/core.ts | 4 +- src/helpers/zod.ts | 46 + src/index.ts | 58 +- src/lib/ResponsesParser.ts | 262 ++ src/lib/parser.ts | 28 + src/lib/responses/EventTypes.ts | 76 + src/lib/responses/ResponseStream.ts | 298 ++ src/resources/beta/assistants.ts | 55 +- src/resources/beta/beta.ts | 37 - src/resources/beta/index.ts | 16 - src/resources/beta/threads/runs/runs.ts | 7 +- src/resources/beta/threads/threads.ts | 90 +- src/resources/chat/chat.ts | 46 +- src/resources/chat/completions/completions.ts | 290 +- src/resources/chat/completions/index.ts | 1 - src/resources/chat/completions/messages.ts | 2 +- src/resources/chat/index.ts | 3 +- src/resources/files.ts | 26 +- src/resources/index.ts | 20 + src/resources/responses/index.ts | 9 + src/resources/responses/input-items.ts | 276 ++ src/resources/responses/responses.ts | 2761 +++++++++++++++++ src/resources/shared.ts | 156 +- src/resources/uploads/uploads.ts | 7 +- .../{beta => }/vector-stores/file-batches.ts | 23 +- .../{beta => }/vector-stores/files.ts | 89 +- .../{beta => }/vector-stores/index.ts | 6 + .../{beta => }/vector-stores/vector-stores.ts | 130 +- src/streaming.ts | 2 +- .../chat/completions/completions.test.ts | 11 +- .../responses/input-items.test.ts | 40 + .../responses.test.ts} | 68 +- .../vector-stores/file-batches.test.ts | 21 +- .../api-resources/vector-stores/files.test.ts | 132 + .../vector-stores/vector-stores.test.ts | 39 +- 44 files changed, 5226 insertions(+), 458 deletions(-) create mode 100755 examples/responses/stream.ts create mode 100755 examples/responses/streaming-tools.ts create mode 100755 examples/responses/structured-outputs-tools.ts create mode 100755 examples/responses/structured-outputs.ts create mode 100644 examples/yarn.lock create mode 100644 src/lib/ResponsesParser.ts create mode 100644 src/lib/responses/EventTypes.ts create mode 100644 src/lib/responses/ResponseStream.ts create mode 100644 src/resources/responses/index.ts create mode 100644 src/resources/responses/input-items.ts create mode 100644 src/resources/responses/responses.ts rename src/resources/{beta => }/vector-stores/file-batches.ts (92%) rename src/resources/{beta => }/vector-stores/files.ts (74%) rename src/resources/{beta => }/vector-stores/index.ts (82%) rename src/resources/{beta => }/vector-stores/vector-stores.ts (77%) create mode 100644 tests/api-resources/responses/input-items.test.ts rename tests/api-resources/{beta/vector-stores/files.test.ts => responses/responses.test.ts} (58%) rename tests/api-resources/{beta => }/vector-stores/file-batches.test.ts (81%) create mode 100644 tests/api-resources/vector-stores/files.test.ts rename tests/api-resources/{beta => }/vector-stores/vector-stores.test.ts (71%) diff --git a/.stats.yml b/.stats.yml index 163146e38..455874212 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 74 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5d30684c3118d049682ea30cdb4dbef39b97d51667da484689193dc40162af32.yml +configured_endpoints: 81 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-be834d63e326a82494e819085137f5eb15866f3fc787db1f3afe7168d419e18a.yml diff --git a/README.md b/README.md index 166e35e22..8515c81ed 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,3 @@ -> [!IMPORTANT] -> We're actively working on a new alpha version that migrates from `node-fetch` to builtin fetch. -> -> Please try it out and let us know if you run into any issues! -> https://community.openai.com/t/your-feedback-requested-node-js-sdk-5-0-0-alpha/1063774 - # OpenAI TypeScript and JavaScript API Library [![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) [![JSR Version](https://jsr.io/badges/@openai/openai)](https://jsr.io/@openai/openai) @@ -27,9 +21,7 @@ deno add jsr:@openai/openai npx jsr add @openai/openai ``` -These commands will make the module importable from the `@openai/openai` scope: - -You can also [import directly from JSR](https://jsr.io/docs/using-packages#importing-with-jsr-specifiers) without an install step if you're using the Deno JavaScript runtime: +These commands will make the module importable from the `@openai/openai` scope. You can also [import directly from JSR](https://jsr.io/docs/using-packages#importing-with-jsr-specifiers) without an install step if you're using the Deno JavaScript runtime: ```ts import OpenAI from 'jsr:@openai/openai'; @@ -37,9 +29,10 @@ import OpenAI from 'jsr:@openai/openai'; ## Usage -The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). The code below shows how to get started using the chat completions API. +The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). + +The primary API for interacting with OpenAI models is the [Responses API](https://platform.openai.com/docs/api-reference/responses). You can generate text from the model with the code below. - ```ts import OpenAI from 'openai'; @@ -47,100 +40,55 @@ const client = new OpenAI({ apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted }); -async function main() { - const chatCompletion = await client.chat.completions.create({ - messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-4o', - }); -} +const response = await client.responses.create({ + model: 'gpt-4o', + instructions: 'You are a coding assistant that talks like a pirate', + input: 'Are semicolons optional in JavaScript?', +}); -main(); +console.log(response.output_text); ``` -## Streaming responses - -We provide support for streaming responses using Server Sent Events (SSE). +The previous standard (supported indefinitely) for generating text is the [Chat Completions API](https://platform.openai.com/docs/api-reference/chat). You can use that API to generate text from the model with the code below. ```ts import OpenAI from 'openai'; -const client = new OpenAI(); +const client = new OpenAI({ + apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted +}); -async function main() { - const stream = await client.chat.completions.create({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Say this is a test' }], - stream: true, - }); - for await (const chunk of stream) { - process.stdout.write(chunk.choices[0]?.delta?.content || ''); - } -} +const completion = await client.chat.completions.create({ + model: 'gpt-4o', + messages: [ + { role: 'developer', content: 'Talk like a pirate.' }, + { role: 'user', content: 'Are semicolons optional in JavaScript?' }, + ], +}); -main(); +console.log(completion.choices[0].message.content); ``` -If you need to cancel a stream, you can `break` from the loop or call `stream.controller.abort()`. - -### Chat Completion streaming helpers +## Streaming responses -This library also provides several conveniences for streaming chat completions, for example: +We provide support for streaming responses using Server Sent Events (SSE). ```ts import OpenAI from 'openai'; -const openai = new OpenAI(); - -async function main() { - const stream = await openai.beta.chat.completions.stream({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Say this is a test' }], - stream: true, - }); - - stream.on('content', (delta, snapshot) => { - process.stdout.write(delta); - }); - - // or, equivalently: - for await (const chunk of stream) { - process.stdout.write(chunk.choices[0]?.delta?.content || ''); - } - - const chatCompletion = await stream.finalChatCompletion(); - console.log(chatCompletion); // {id: "…", choices: […], …} -} - -main(); -``` - -See [helpers.md](helpers.md#chat-events) for more details. - -### Request & Response types - -This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: - - -```ts -import OpenAI from 'openai'; +const client = new OpenAI(); -const client = new OpenAI({ - apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted +const stream = await client.responses.create({ + model: 'gpt-4o', + input: 'Say "Sheep sleep deep" ten times fast!', + stream: true, }); -async function main() { - const params: OpenAI.Chat.ChatCompletionCreateParams = { - messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-4o', - }; - const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params); +for await (const event of stream) { + console.log(event); } - -main(); ``` -Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. - ## File uploads Request parameters that correspond to file uploads can be passed in many different forms: @@ -265,17 +213,17 @@ Note that requests which time out will be [retried twice by default](#retries). All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. ```ts -const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }); -console.log(completion._request_id) // req_123 +const response = await client.responses.create({ model: 'gpt-4o', input: 'testing 123' }); +console.log(response._request_id) // req_123 ``` You can also access the Request ID using the `.withResponse()` method: ```ts -const { data: stream, request_id } = await openai.chat.completions +const { data: stream, request_id } = await openai.responses .create({ - model: 'gpt-4', - messages: [{ role: 'user', content: 'Say this is a test' }], + model: 'gpt-4o', + input: 'Say this is a test', stream: true, }) .withResponse(); @@ -355,12 +303,6 @@ console.log(result.choices[0]!.message?.content); For more information on support for the Azure API, see [azure.md](azure.md). -## Automated function calls - -We provide the `openai.beta.chat.completions.runTools({…})` convenience helper for using function tool calls with the `/chat/completions` endpoint which automatically call the JavaScript functions you provide and sends their results back to the `/chat/completions` endpoint, looping as long as the model requests tool calls. - -For more information see [helpers.md](helpers.md#automated-function-calls). - ## Advanced Usage ### Accessing raw Response data (e.g., headers) @@ -373,17 +315,19 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi ```ts const client = new OpenAI(); -const response = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }) +const httpResponse = await client.responses + .create({ model: 'gpt-4o', input: 'say this is a test.' }) .asResponse(); -console.log(response.headers.get('X-My-Header')); -console.log(response.statusText); // access the underlying Response object -const { data: chatCompletion, response: raw } = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }) +// access the underlying web standard Response object +console.log(httpResponse.headers.get('X-My-Header')); +console.log(httpResponse.statusText); + +const { data: modelResponse, response: raw } = await client.responses + .create({ model: 'gpt-4o', input: 'say this is a test.' }) .withResponse(); console.log(raw.headers.get('X-My-Header')); -console.log(chatCompletion); +console.log(modelResponse); ``` ### Making custom/undocumented requests @@ -432,6 +376,11 @@ validate or strip extra properties from the response from the API. ### Customizing the fetch client +> We're actively working on a new alpha version that migrates from `node-fetch` to builtin fetch. +> +> Please try it out and let us know if you run into any issues! +> https://community.openai.com/t/your-feedback-requested-node-js-sdk-5-0-0-alpha/1063774 + By default, this library uses `node-fetch` in Node, and expects a global `fetch` function in other environments. If you would prefer to use a global, web-standards-compliant `fetch` function even in a Node environment, diff --git a/api.md b/api.md index 63f239628..b21ac2d5f 100644 --- a/api.md +++ b/api.md @@ -2,10 +2,15 @@ Types: +- ChatModel +- ComparisonFilter +- CompoundFilter - ErrorObject - FunctionDefinition - FunctionParameters - Metadata +- Reasoning +- ReasoningEffort - ResponseFormatJSONObject - ResponseFormatJSONSchema - ResponseFormatText @@ -52,7 +57,6 @@ Types: - ChatCompletionModality - ChatCompletionNamedToolChoice - ChatCompletionPredictionContent -- ChatCompletionReasoningEffort - ChatCompletionRole - ChatCompletionStoreMessage - ChatCompletionStreamOptions @@ -63,6 +67,7 @@ Types: - ChatCompletionToolMessageParam - ChatCompletionUserMessageParam - CreateChatCompletionRequestMessage +- ChatCompletionReasoningEffort Methods: @@ -224,6 +229,67 @@ Methods: - client.fineTuning.jobs.checkpoints.list(fineTuningJobId, { ...params }) -> FineTuningJobCheckpointsPage +# VectorStores + +Types: + +- AutoFileChunkingStrategyParam +- FileChunkingStrategy +- FileChunkingStrategyParam +- OtherFileChunkingStrategyObject +- StaticFileChunkingStrategy +- StaticFileChunkingStrategyObject +- StaticFileChunkingStrategyObjectParam +- VectorStore +- VectorStoreDeleted +- VectorStoreSearchResponse + +Methods: + +- client.vectorStores.create({ ...params }) -> VectorStore +- client.vectorStores.retrieve(vectorStoreId) -> VectorStore +- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore +- client.vectorStores.list({ ...params }) -> VectorStoresPage +- client.vectorStores.del(vectorStoreId) -> VectorStoreDeleted +- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponsesPage + +## Files + +Types: + +- VectorStoreFile +- VectorStoreFileDeleted +- FileContentResponse + +Methods: + +- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile +- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesPage +- client.vectorStores.files.del(vectorStoreId, fileId) -> VectorStoreFileDeleted +- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponsesPage +- client.beta.vectorStores.files.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.poll(vectorStoreId, fileId, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.upload(vectorStoreId, file, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.uploadAndPoll(vectorStoreId, file, options?) -> Promise<VectorStoreFile> + +## FileBatches + +Types: + +- VectorStoreFileBatch + +Methods: + +- client.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesPage +- client.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFileBatch> +- client.beta.vectorStores.fileBatches.poll(vectorStoreId, batchId, options?) -> Promise<VectorStoreFileBatch> +- client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options?) -> Promise<VectorStoreFileBatch> + # Beta ## Realtime @@ -287,72 +353,6 @@ Methods: - client.beta.realtime.sessions.create({ ...params }) -> SessionCreateResponse -## VectorStores - -Types: - -- AutoFileChunkingStrategyParam -- FileChunkingStrategy -- FileChunkingStrategyParam -- OtherFileChunkingStrategyObject -- StaticFileChunkingStrategy -- StaticFileChunkingStrategyObject -- StaticFileChunkingStrategyObjectParam -- VectorStore -- VectorStoreDeleted - -Methods: - -- client.beta.vectorStores.create({ ...params }) -> VectorStore -- client.beta.vectorStores.retrieve(vectorStoreId) -> VectorStore -- client.beta.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore -- client.beta.vectorStores.list({ ...params }) -> VectorStoresPage -- client.beta.vectorStores.del(vectorStoreId) -> VectorStoreDeleted - -### Files - -Types: - -- VectorStoreFile -- VectorStoreFileDeleted - -Methods: - -- client.beta.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile -- client.beta.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile -- client.beta.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesPage -- client.beta.vectorStores.files.del(vectorStoreId, fileId) -> VectorStoreFileDeleted -- client.beta.vectorStores.files.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.poll(vectorStoreId, fileId, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.upload(vectorStoreId, file, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.uploadAndPoll(vectorStoreId, file, options?) -> Promise<VectorStoreFile> - -### FileBatches - -Types: - -- VectorStoreFileBatch - -Methods: - -- client.beta.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesPage -- client.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFileBatch> -- client.beta.vectorStores.fileBatches.poll(vectorStoreId, batchId, options?) -> Promise<VectorStoreFileBatch> -- client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options?) -> Promise<VectorStoreFileBatch> - -## Chat - -### Completions - -Methods: - -- client.beta.chat.completions.runFunctions(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner -- client.beta.chat.completions.runTools(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner -- client.beta.chat.completions.stream(body, options?) -> ChatCompletionStream - ## Assistants Types: @@ -526,3 +526,93 @@ Types: Methods: - client.uploads.parts.create(uploadId, { ...params }) -> UploadPart + +# Responses + +Types: + +- ComputerTool +- EasyInputMessage +- FileSearchTool +- FunctionTool +- Response +- ResponseAudioDeltaEvent +- ResponseAudioDoneEvent +- ResponseAudioTranscriptDeltaEvent +- ResponseAudioTranscriptDoneEvent +- ResponseCodeInterpreterCallCodeDeltaEvent +- ResponseCodeInterpreterCallCodeDoneEvent +- ResponseCodeInterpreterCallCompletedEvent +- ResponseCodeInterpreterCallInProgressEvent +- ResponseCodeInterpreterCallInterpretingEvent +- ResponseCodeInterpreterToolCall +- ResponseCompletedEvent +- ResponseComputerToolCall +- ResponseContent +- ResponseContentPartAddedEvent +- ResponseContentPartDoneEvent +- ResponseCreatedEvent +- ResponseError +- ResponseErrorEvent +- ResponseFailedEvent +- ResponseFileSearchCallCompletedEvent +- ResponseFileSearchCallInProgressEvent +- ResponseFileSearchCallSearchingEvent +- ResponseFileSearchToolCall +- ResponseFormatTextConfig +- ResponseFormatTextJSONSchemaConfig +- ResponseFunctionCallArgumentsDeltaEvent +- ResponseFunctionCallArgumentsDoneEvent +- ResponseFunctionToolCall +- ResponseFunctionWebSearch +- ResponseInProgressEvent +- ResponseIncludable +- ResponseIncompleteEvent +- ResponseInput +- ResponseInputAudio +- ResponseInputContent +- ResponseInputFile +- ResponseInputImage +- ResponseInputItem +- ResponseInputMessageContentList +- ResponseInputText +- ResponseOutputAudio +- ResponseOutputItem +- ResponseOutputItemAddedEvent +- ResponseOutputItemDoneEvent +- ResponseOutputMessage +- ResponseOutputRefusal +- ResponseOutputText +- ResponseRefusalDeltaEvent +- ResponseRefusalDoneEvent +- ResponseStatus +- ResponseStreamEvent +- ResponseTextAnnotationDeltaEvent +- ResponseTextConfig +- ResponseTextDeltaEvent +- ResponseTextDoneEvent +- ResponseUsage +- ResponseWebSearchCallCompletedEvent +- ResponseWebSearchCallInProgressEvent +- ResponseWebSearchCallSearchingEvent +- Tool +- ToolChoiceFunction +- ToolChoiceOptions +- ToolChoiceTypes +- WebSearchTool + +Methods: + +- client.responses.create({ ...params }) -> Response +- client.responses.retrieve(responseId, { ...params }) -> Response +- client.responses.del(responseId) -> void + +## InputItems + +Types: + +- ResponseItemList + +Methods: + +- client.responses.inputItems.list(responseId, { ...params }) -> ResponseItemListDataPage diff --git a/examples/responses/stream.ts b/examples/responses/stream.ts new file mode 100755 index 000000000..ea3d0849e --- /dev/null +++ b/examples/responses/stream.ts @@ -0,0 +1,24 @@ +#!/usr/bin/env -S npm run tsn -T + +import OpenAI from 'openai'; + +const openai = new OpenAI(); + +async function main() { + const runner = openai.responses + .stream({ + model: 'gpt-4o-2024-08-06', + input: 'solve 8x + 31 = 2', + }) + .on('event', (event) => console.log(event)) + .on('response.output_text.delta', (diff) => process.stdout.write(diff.delta)); + + for await (const event of runner) { + console.log('event', event); + } + + const result = await runner.finalResponse(); + console.log(result); +} + +main(); diff --git a/examples/responses/streaming-tools.ts b/examples/responses/streaming-tools.ts new file mode 100755 index 000000000..87a48d0c3 --- /dev/null +++ b/examples/responses/streaming-tools.ts @@ -0,0 +1,52 @@ +#!/usr/bin/env -S npm run tsn -T + +import { OpenAI } from 'openai'; +import { zodResponsesFunction } from 'openai/helpers/zod'; +import { z } from 'zod'; + +const Table = z.enum(['orders', 'customers', 'products']); +const Column = z.enum([ + 'id', + 'status', + 'expected_delivery_date', + 'delivered_at', + 'shipped_at', + 'ordered_at', + 'canceled_at', +]); +const Operator = z.enum(['=', '>', '<', '<=', '>=', '!=']); +const OrderBy = z.enum(['asc', 'desc']); +const DynamicValue = z.object({ + column_name: Column, +}); + +const Condition = z.object({ + column: Column, + operator: Operator, + value: z.union([z.string(), z.number(), DynamicValue]), +}); + +const Query = z.object({ + table_name: Table, + columns: z.array(Column), + conditions: z.array(Condition), + order_by: OrderBy, +}); + +async function main() { + const client = new OpenAI(); + + const tool = zodResponsesFunction({ name: 'query', parameters: Query }); + + const stream = client.responses.stream({ + model: 'gpt-4o-2024-08-06', + input: 'look up all my orders in november of last year that were fulfilled but not delivered on time', + tools: [tool], + }); + + for await (const event of stream) { + console.dir(event, { depth: 10 }); + } +} + +main(); diff --git a/examples/responses/structured-outputs-tools.ts b/examples/responses/structured-outputs-tools.ts new file mode 100755 index 000000000..29eaabf93 --- /dev/null +++ b/examples/responses/structured-outputs-tools.ts @@ -0,0 +1,60 @@ +#!/usr/bin/env -S npm run tsn -T + +import { OpenAI } from 'openai'; +import { zodResponsesFunction } from 'openai/helpers/zod'; +import { z } from 'zod'; + +const Table = z.enum(['orders', 'customers', 'products']); +const Column = z.enum([ + 'id', + 'status', + 'expected_delivery_date', + 'delivered_at', + 'shipped_at', + 'ordered_at', + 'canceled_at', +]); +const Operator = z.enum(['=', '>', '<', '<=', '>=', '!=']); +const OrderBy = z.enum(['asc', 'desc']); +const DynamicValue = z.object({ + column_name: Column, +}); + +const Condition = z.object({ + column: Column, + operator: Operator, + value: z.union([z.string(), z.number(), DynamicValue]), +}); + +const Query = z.object({ + table_name: Table, + columns: z.array(Column), + conditions: z.array(Condition), + order_by: OrderBy, +}); + +async function main() { + const client = new OpenAI(); + + const tool = zodResponsesFunction({ name: 'query', parameters: Query }); + + const rsp = await client.responses.parse({ + model: 'gpt-4o-2024-08-06', + input: 'look up all my orders in november of last year that were fulfilled but not delivered on time', + tools: [tool], + }); + + console.log(rsp); + + const functionCall = rsp.output[0]!; + + if (functionCall.type !== 'function_call') { + throw new Error('Expected function call'); + } + + const query = functionCall.parsed_arguments; + + console.log(query); +} + +main(); diff --git a/examples/responses/structured-outputs.ts b/examples/responses/structured-outputs.ts new file mode 100755 index 000000000..07ff93a60 --- /dev/null +++ b/examples/responses/structured-outputs.ts @@ -0,0 +1,32 @@ +#!/usr/bin/env -S npm run tsn -T + +import { OpenAI } from 'openai'; +import { zodTextFormat } from 'openai/helpers/zod'; +import { z } from 'zod'; + +const Step = z.object({ + explanation: z.string(), + output: z.string(), +}); + +const MathResponse = z.object({ + steps: z.array(Step), + final_answer: z.string(), +}); + +const client = new OpenAI(); + +async function main() { + const rsp = await client.responses.parse({ + input: 'solve 8x + 31 = 2', + model: 'gpt-4o-2024-08-06', + text: { + format: zodTextFormat(MathResponse, 'math_response'), + }, + }); + + console.log(rsp.output_parsed); + console.log('answer: ', rsp.output_parsed?.final_answer); +} + +main().catch(console.error); diff --git a/examples/yarn.lock b/examples/yarn.lock new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/bootstrap b/scripts/bootstrap index 033156d3a..f107c3a24 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -4,7 +4,7 @@ set -e cd "$(dirname "$0")/.." -if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ]; then brew bundle check >/dev/null 2>&1 || { echo "==> Installing Homebrew dependencies…" brew bundle diff --git a/src/core.ts b/src/core.ts index 6578c0781..a41eaa3fa 100644 --- a/src/core.ts +++ b/src/core.ts @@ -62,8 +62,8 @@ async function defaultParseResponse(props: APIResponseProps): Promise { return _zodToJsonSchema(schema, { @@ -74,6 +78,23 @@ export function zodResponseFormat( ); } +export function zodTextFormat( + zodObject: ZodInput, + name: string, + props?: Omit, +): AutoParseableTextFormat> { + return makeParseableTextFormat( + { + type: 'json_schema', + ...props, + name, + strict: true, + schema: zodToJsonSchema(zodObject, { name }), + }, + (content) => zodObject.parse(JSON.parse(content)), + ); +} + /** * Creates a chat completion `function` tool that can be invoked * automatically by the chat completion `.runTools()` method or automatically @@ -106,3 +127,28 @@ export function zodFunction(options: { }, ); } + +export function zodResponsesFunction(options: { + name: string; + parameters: Parameters; + function?: ((args: zodInfer) => unknown | Promise) | undefined; + description?: string | undefined; +}): AutoParseableResponseTool<{ + arguments: Parameters; + name: string; + function: (args: zodInfer) => unknown; +}> { + return makeParseableResponseTool( + { + type: 'function', + name: options.name, + parameters: zodToJsonSchema(options.parameters, { name: options.name }), + strict: true, + ...(options.description ? { description: options.description } : undefined), + }, + { + callback: options.function, + parser: (args) => options.parameters.parse(JSON.parse(args)), + }, + ); +} diff --git a/src/index.ts b/src/index.ts index debefce8c..c3abed2db 100644 --- a/src/index.ts +++ b/src/index.ts @@ -65,14 +65,34 @@ import { } from './resources/moderations'; import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio'; import { Beta } from './resources/beta/beta'; -import { Chat, ChatModel } from './resources/chat/chat'; +import { Chat } from './resources/chat/chat'; import { FineTuning } from './resources/fine-tuning/fine-tuning'; +import { Responses } from './resources/responses/responses'; import { Upload, UploadCompleteParams, UploadCreateParams, Uploads as UploadsAPIUploads, } from './resources/uploads/uploads'; +import { + AutoFileChunkingStrategyParam, + FileChunkingStrategy, + FileChunkingStrategyParam, + OtherFileChunkingStrategyObject, + StaticFileChunkingStrategy, + StaticFileChunkingStrategyObject, + StaticFileChunkingStrategyObjectParam, + VectorStore, + VectorStoreCreateParams, + VectorStoreDeleted, + VectorStoreListParams, + VectorStoreSearchParams, + VectorStoreSearchResponse, + VectorStoreSearchResponsesPage, + VectorStoreUpdateParams, + VectorStores, + VectorStoresPage, +} from './resources/vector-stores/vector-stores'; import { ChatCompletion, ChatCompletionAssistantMessageParam, @@ -98,7 +118,6 @@ import { ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, - ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStoreMessage, ChatCompletionStreamOptions, @@ -267,9 +286,11 @@ export class OpenAI extends Core.APIClient { moderations: API.Moderations = new API.Moderations(this); models: API.Models = new API.Models(this); fineTuning: API.FineTuning = new API.FineTuning(this); + vectorStores: API.VectorStores = new API.VectorStores(this); beta: API.Beta = new API.Beta(this); batches: API.Batches = new API.Batches(this); uploads: API.Uploads = new API.Uploads(this); + responses: API.Responses = new API.Responses(this); protected override defaultQuery(): Core.DefaultQuery | undefined { return this._options.defaultQuery; @@ -325,10 +346,14 @@ OpenAI.Moderations = Moderations; OpenAI.Models = Models; OpenAI.ModelsPage = ModelsPage; OpenAI.FineTuning = FineTuning; +OpenAI.VectorStores = VectorStores; +OpenAI.VectorStoresPage = VectorStoresPage; +OpenAI.VectorStoreSearchResponsesPage = VectorStoreSearchResponsesPage; OpenAI.Beta = Beta; OpenAI.Batches = Batches; OpenAI.BatchesPage = BatchesPage; OpenAI.Uploads = UploadsAPIUploads; +OpenAI.Responses = Responses; export declare namespace OpenAI { export type RequestOptions = Core.RequestOptions; @@ -350,7 +375,6 @@ export declare namespace OpenAI { export { Chat as Chat, - type ChatModel as ChatModel, type ChatCompletion as ChatCompletion, type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, type ChatCompletionAudio as ChatCompletionAudio, @@ -371,7 +395,6 @@ export declare namespace OpenAI { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, @@ -440,6 +463,26 @@ export declare namespace OpenAI { export { FineTuning as FineTuning }; + export { + VectorStores as VectorStores, + type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, + type FileChunkingStrategy as FileChunkingStrategy, + type FileChunkingStrategyParam as FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy as StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, + type VectorStore as VectorStore, + type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreSearchResponse as VectorStoreSearchResponse, + VectorStoresPage as VectorStoresPage, + VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, + type VectorStoreCreateParams as VectorStoreCreateParams, + type VectorStoreUpdateParams as VectorStoreUpdateParams, + type VectorStoreListParams as VectorStoreListParams, + type VectorStoreSearchParams as VectorStoreSearchParams, + }; + export { Beta as Beta }; export { @@ -459,10 +502,17 @@ export declare namespace OpenAI { type UploadCompleteParams as UploadCompleteParams, }; + export { Responses as Responses }; + + export type ChatModel = API.ChatModel; + export type ComparisonFilter = API.ComparisonFilter; + export type CompoundFilter = API.CompoundFilter; export type ErrorObject = API.ErrorObject; export type FunctionDefinition = API.FunctionDefinition; export type FunctionParameters = API.FunctionParameters; export type Metadata = API.Metadata; + export type Reasoning = API.Reasoning; + export type ReasoningEffort = API.ReasoningEffort; export type ResponseFormatJSONObject = API.ResponseFormatJSONObject; export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; export type ResponseFormatText = API.ResponseFormatText; diff --git a/src/lib/ResponsesParser.ts b/src/lib/ResponsesParser.ts new file mode 100644 index 000000000..780b779ff --- /dev/null +++ b/src/lib/ResponsesParser.ts @@ -0,0 +1,262 @@ +import { OpenAIError } from '../error'; +import { type ChatCompletionTool } from '../resources'; +import { + type FunctionTool, + type ParsedContent, + type ParsedResponse, + type ParsedResponseFunctionToolCall, + type ParsedResponseOutputItem, + type Response, + type ResponseCreateParamsBase, + type ResponseCreateParamsNonStreaming, + type ResponseFunctionToolCall, + type Tool, +} from '../resources/responses/responses'; +import { type AutoParseableTextFormat, isAutoParsableResponseFormat } from '../lib/parser'; + +type ParseableToolsParams = Array | ChatCompletionTool | null; + +export type ResponseCreateParamsWithTools = ResponseCreateParamsBase & { + tools?: ParseableToolsParams; +}; + +export type ExtractParsedContentFromParams = + NonNullable['format'] extends AutoParseableTextFormat ? P : null; + +export function maybeParseResponse< + Params extends ResponseCreateParamsBase | null, + ParsedT = Params extends null ? null : ExtractParsedContentFromParams>, +>(response: Response, params: Params): ParsedResponse { + if (!params || !hasAutoParseableInput(params)) { + return { + ...response, + output_parsed: null, + output: response.output.map((item) => { + if (item.type === 'function_call') { + return { + ...item, + parsed_arguments: null, + }; + } + + if (item.type === 'message') { + return { + ...item, + content: item.content.map((content) => ({ + ...content, + parsed: null, + })), + }; + } else { + return item; + } + }), + }; + } + + return parseResponse(response, params); +} + +export function parseResponse< + Params extends ResponseCreateParamsBase, + ParsedT = ExtractParsedContentFromParams, +>(response: Response, params: Params): ParsedResponse { + const output: Array> = response.output.map( + (item): ParsedResponseOutputItem => { + if (item.type === 'function_call') { + return { + ...item, + parsed_arguments: parseToolCall(params, item), + }; + } + if (item.type === 'message') { + const content: Array> = item.content.map((content) => { + if (content.type === 'output_text') { + return { + ...content, + parsed: parseTextFormat(params, content.text), + }; + } + + return content; + }); + + return { + ...item, + content, + }; + } + + return item; + }, + ); + + const parsed: Omit, 'output_parsed'> = Object.assign({}, response, { output }); + if (!Object.getOwnPropertyDescriptor(response, 'output_text')) { + addOutputText(parsed); + } + + Object.defineProperty(parsed, 'output_parsed', { + enumerable: true, + get() { + for (const output of parsed.output) { + if (output.type !== 'message') { + continue; + } + + for (const content of output.content) { + if (content.type === 'output_text' && content.parsed !== null) { + return content.parsed; + } + } + } + + return null; + }, + }); + + return parsed as ParsedResponse; +} + +function parseTextFormat< + Params extends ResponseCreateParamsBase, + ParsedT = ExtractParsedContentFromParams, +>(params: Params, content: string): ParsedT | null { + if (params.text?.format?.type !== 'json_schema') { + return null; + } + + if ('$parseRaw' in params.text?.format) { + const text_format = params.text?.format as unknown as AutoParseableTextFormat; + return text_format.$parseRaw(content); + } + + return JSON.parse(content); +} + +export function hasAutoParseableInput(params: ResponseCreateParamsWithTools): boolean { + if (isAutoParsableResponseFormat(params.text?.format)) { + return true; + } + + return false; +} + +type ToolOptions = { + name: string; + arguments: any; + function?: ((args: any) => any) | undefined; +}; + +export type AutoParseableResponseTool< + OptionsT extends ToolOptions, + HasFunction = OptionsT['function'] extends Function ? true : false, +> = FunctionTool & { + __arguments: OptionsT['arguments']; // type-level only + __name: OptionsT['name']; // type-level only + + $brand: 'auto-parseable-tool'; + $callback: ((args: OptionsT['arguments']) => any) | undefined; + $parseRaw(args: string): OptionsT['arguments']; +}; + +export function makeParseableResponseTool( + tool: FunctionTool, + { + parser, + callback, + }: { + parser: (content: string) => OptionsT['arguments']; + callback: ((args: any) => any) | undefined; + }, +): AutoParseableResponseTool { + const obj = { ...tool }; + + Object.defineProperties(obj, { + $brand: { + value: 'auto-parseable-tool', + enumerable: false, + }, + $parseRaw: { + value: parser, + enumerable: false, + }, + $callback: { + value: callback, + enumerable: false, + }, + }); + + return obj as AutoParseableResponseTool; +} + +export function isAutoParsableTool(tool: any): tool is AutoParseableResponseTool { + return tool?.['$brand'] === 'auto-parseable-tool'; +} + +function getInputToolByName(input_tools: Array, name: string): FunctionTool | undefined { + return input_tools.find((tool) => tool.type === 'function' && tool.name === name) as + | FunctionTool + | undefined; +} + +function parseToolCall( + params: Params, + toolCall: ResponseFunctionToolCall, +): ParsedResponseFunctionToolCall { + const inputTool = getInputToolByName(params.tools ?? [], toolCall.name); + + return { + ...toolCall, + ...toolCall, + parsed_arguments: + isAutoParsableTool(inputTool) ? inputTool.$parseRaw(toolCall.arguments) + : inputTool?.strict ? JSON.parse(toolCall.arguments) + : null, + }; +} + +export function shouldParseToolCall( + params: ResponseCreateParamsNonStreaming | null | undefined, + toolCall: ResponseFunctionToolCall, +): boolean { + if (!params) { + return false; + } + + const inputTool = getInputToolByName(params.tools ?? [], toolCall.name); + return isAutoParsableTool(inputTool) || inputTool?.strict || false; +} + +export function validateInputTools(tools: ChatCompletionTool[] | undefined) { + for (const tool of tools ?? []) { + if (tool.type !== 'function') { + throw new OpenAIError( + `Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``, + ); + } + + if (tool.function.strict !== true) { + throw new OpenAIError( + `The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`, + ); + } + } +} + +export function addOutputText(rsp: Response): void { + const texts: string[] = []; + for (const output of rsp.output) { + if (output.type !== 'message') { + continue; + } + + for (const content of output.content) { + if (content.type === 'output_text') { + texts.push(content.text); + } + } + } + + rsp.output_text = texts.join(''); +} diff --git a/src/lib/parser.ts b/src/lib/parser.ts index a750375dc..d75d32a40 100644 --- a/src/lib/parser.ts +++ b/src/lib/parser.ts @@ -14,6 +14,7 @@ import { } from '../resources/beta/chat/completions'; import { ResponseFormatJSONSchema } from '../resources/shared'; import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from '../error'; +import { type ResponseFormatTextJSONSchemaConfig } from '../resources/responses/responses'; type AnyChatCompletionCreateParams = | ChatCompletionCreateParams @@ -51,6 +52,33 @@ export function makeParseableResponseFormat( return obj as AutoParseableResponseFormat; } +export type AutoParseableTextFormat = ResponseFormatTextJSONSchemaConfig & { + __output: ParsedT; // type-level only + + $brand: 'auto-parseable-response-format'; + $parseRaw(content: string): ParsedT; +}; + +export function makeParseableTextFormat( + response_format: ResponseFormatTextJSONSchemaConfig, + parser: (content: string) => ParsedT, +): AutoParseableTextFormat { + const obj = { ...response_format }; + + Object.defineProperties(obj, { + $brand: { + value: 'auto-parseable-response-format', + enumerable: false, + }, + $parseRaw: { + value: parser, + enumerable: false, + }, + }); + + return obj as AutoParseableTextFormat; +} + export function isAutoParsableResponseFormat( response_format: any, ): response_format is AutoParseableResponseFormat { diff --git a/src/lib/responses/EventTypes.ts b/src/lib/responses/EventTypes.ts new file mode 100644 index 000000000..fc1620988 --- /dev/null +++ b/src/lib/responses/EventTypes.ts @@ -0,0 +1,76 @@ +import { + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseCodeInterpreterCallCodeDeltaEvent, + ResponseCodeInterpreterCallCodeDoneEvent, + ResponseCodeInterpreterCallCompletedEvent, + ResponseCodeInterpreterCallInProgressEvent, + ResponseCodeInterpreterCallInterpretingEvent, + ResponseCompletedEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreatedEvent, + ResponseErrorEvent, + ResponseFailedEvent, + ResponseFileSearchCallCompletedEvent, + ResponseFileSearchCallInProgressEvent, + ResponseFileSearchCallSearchingEvent, + ResponseFunctionCallArgumentsDeltaEvent as RawResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseInProgressEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseRefusalDeltaEvent, + ResponseRefusalDoneEvent, + ResponseTextAnnotationDeltaEvent, + ResponseTextDeltaEvent as RawResponseTextDeltaEvent, + ResponseTextDoneEvent, + ResponseIncompleteEvent, + ResponseWebSearchCallCompletedEvent, + ResponseWebSearchCallInProgressEvent, + ResponseWebSearchCallSearchingEvent, +} from '../../resources/responses/responses'; + +export type ResponseFunctionCallArgumentsDeltaEvent = RawResponseFunctionCallArgumentsDeltaEvent & { + snapshot: string; +}; + +export type ResponseTextDeltaEvent = RawResponseTextDeltaEvent & { + snapshot: string; +}; + +export type ParsedResponseStreamEvent = + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseCodeInterpreterCallCodeDeltaEvent + | ResponseCodeInterpreterCallCodeDoneEvent + | ResponseCodeInterpreterCallCompletedEvent + | ResponseCodeInterpreterCallInProgressEvent + | ResponseCodeInterpreterCallInterpretingEvent + | ResponseCompletedEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseCreatedEvent + | ResponseErrorEvent + | ResponseFileSearchCallCompletedEvent + | ResponseFileSearchCallInProgressEvent + | ResponseFileSearchCallSearchingEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | ResponseInProgressEvent + | ResponseFailedEvent + | ResponseIncompleteEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseRefusalDeltaEvent + | ResponseRefusalDoneEvent + | ResponseTextAnnotationDeltaEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | ResponseWebSearchCallCompletedEvent + | ResponseWebSearchCallInProgressEvent + | ResponseWebSearchCallSearchingEvent; diff --git a/src/lib/responses/ResponseStream.ts b/src/lib/responses/ResponseStream.ts new file mode 100644 index 000000000..0d6cd47dd --- /dev/null +++ b/src/lib/responses/ResponseStream.ts @@ -0,0 +1,298 @@ +import { + type ParsedResponse, + type Response, + type ResponseCreateParamsBase, + type ResponseCreateParamsStreaming, + type ResponseStreamEvent, +} from 'openai/resources/responses/responses'; +import * as Core from '../../core'; +import { APIUserAbortError, OpenAIError } from '../../error'; +import OpenAI from '../../index'; +import { type BaseEvents, EventStream } from '../EventStream'; +import { type ResponseFunctionCallArgumentsDeltaEvent, type ResponseTextDeltaEvent } from './EventTypes'; +import { maybeParseResponse } from '../ResponsesParser'; + +export type ResponseStreamParams = Omit & { + stream?: true; +}; + +type ResponseEvents = BaseEvents & + Omit< + { + [K in ResponseStreamEvent['type']]: (event: Extract) => void; + }, + 'response.output_text.delta' | 'response.function_call_arguments.delta' + > & { + event: (event: ResponseStreamEvent) => void; + 'response.output_text.delta': (event: ResponseTextDeltaEvent) => void; + 'response.function_call_arguments.delta': (event: ResponseFunctionCallArgumentsDeltaEvent) => void; + }; + +export type ResponseStreamingParams = Omit & { + stream?: true; +}; + +export class ResponseStream + extends EventStream + implements AsyncIterable +{ + #params: ResponseStreamingParams | null; + #currentResponseSnapshot: Response | undefined; + #finalResponse: ParsedResponse | undefined; + + constructor(params: ResponseStreamingParams | null) { + super(); + this.#params = params; + } + + static createResponse( + client: OpenAI, + params: ResponseStreamParams, + options?: Core.RequestOptions, + ): ResponseStream { + const runner = new ResponseStream(params as ResponseCreateParamsStreaming); + runner._run(() => + runner._createResponse(client, params, { + ...options, + headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' }, + }), + ); + return runner; + } + + #beginRequest() { + if (this.ended) return; + this.#currentResponseSnapshot = undefined; + } + + #addEvent(this: ResponseStream, event: ResponseStreamEvent) { + if (this.ended) return; + + const response = this.#accumulateResponse(event); + this._emit('event', event); + + switch (event.type) { + case 'response.output_text.delta': { + const output = response.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + const content = output.content[event.content_index]; + if (!content) { + throw new OpenAIError(`missing content at index ${event.content_index}`); + } + if (content.type !== 'output_text') { + throw new OpenAIError(`expected content to be 'output_text', got ${content.type}`); + } + + this._emit('response.output_text.delta', { + ...event, + snapshot: content.text, + }); + } + break; + } + case 'response.function_call_arguments.delta': { + const output = response.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'function_call') { + this._emit('response.function_call_arguments.delta', { + ...event, + snapshot: output.arguments, + }); + } + break; + } + default: + // @ts-ignore + this._emit(event.type, event); + break; + } + } + + #endRequest(): ParsedResponse { + if (this.ended) { + throw new OpenAIError(`stream has ended, this shouldn't happen`); + } + const snapshot = this.#currentResponseSnapshot; + if (!snapshot) { + throw new OpenAIError(`request ended without sending any events`); + } + this.#currentResponseSnapshot = undefined; + const parsedResponse = finalizeResponse(snapshot, this.#params); + this.#finalResponse = parsedResponse; + + return parsedResponse; + } + + protected async _createResponse( + client: OpenAI, + params: ResponseStreamingParams, + options?: Core.RequestOptions, + ): Promise> { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + this.#beginRequest(); + + const stream = await client.responses.create( + { ...params, stream: true }, + { ...options, signal: this.controller.signal }, + ); + this._connected(); + for await (const event of stream) { + this.#addEvent(event); + } + if (stream.controller.signal?.aborted) { + throw new APIUserAbortError(); + } + return this.#endRequest(); + } + + #accumulateResponse(event: ResponseStreamEvent): Response { + let snapshot = this.#currentResponseSnapshot; + if (!snapshot) { + if (event.type !== 'response.created') { + throw new OpenAIError( + `When snapshot hasn't been set yet, expected 'response.created' event, got ${event.type}`, + ); + } + snapshot = this.#currentResponseSnapshot = event.response; + return snapshot; + } + + switch (event.type) { + case 'response.output_item.added': { + snapshot.output.push(event.item); + break; + } + case 'response.content_part.added': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + output.content.push(event.part); + } + break; + } + case 'response.output_text.delta': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + const content = output.content[event.content_index]; + if (!content) { + throw new OpenAIError(`missing content at index ${event.content_index}`); + } + if (content.type !== 'output_text') { + throw new OpenAIError(`expected content to be 'output_text', got ${content.type}`); + } + content.text += event.delta; + } + break; + } + case 'response.function_call_arguments.delta': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'function_call') { + output.arguments += event.delta; + } + break; + } + case 'response.completed': { + this.#currentResponseSnapshot = event.response; + break; + } + } + + return snapshot; + } + + [Symbol.asyncIterator](this: ResponseStream): AsyncIterator { + const pushQueue: ResponseStreamEvent[] = []; + const readQueue: { + resolve: (event: ResponseStreamEvent | undefined) => void; + reject: (err: unknown) => void; + }[] = []; + let done = false; + + this.on('event', (event) => { + const reader = readQueue.shift(); + if (reader) { + reader.resolve(event); + } else { + pushQueue.push(event); + } + }); + + this.on('end', () => { + done = true; + for (const reader of readQueue) { + reader.resolve(undefined); + } + readQueue.length = 0; + }); + + this.on('abort', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + this.on('error', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + return { + next: async (): Promise> => { + if (!pushQueue.length) { + if (done) { + return { value: undefined, done: true }; + } + return new Promise((resolve, reject) => + readQueue.push({ resolve, reject }), + ).then((event) => (event ? { value: event, done: false } : { value: undefined, done: true })); + } + const event = pushQueue.shift()!; + return { value: event, done: false }; + }, + return: async () => { + this.abort(); + return { value: undefined, done: true }; + }, + }; + } + + /** + * @returns a promise that resolves with the final Response, or rejects + * if an error occurred or the stream ended prematurely without producing a REsponse. + */ + async finalResponse(): Promise> { + await this.done(); + const response = this.#finalResponse; + if (!response) throw new OpenAIError('stream ended without producing a ChatCompletion'); + return response; + } +} + +function finalizeResponse( + snapshot: Response, + params: ResponseStreamingParams | null, +): ParsedResponse { + return maybeParseResponse(snapshot, params); +} diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 919bf53b3..0668dcf54 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -4,10 +4,8 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as Shared from '../shared'; -import * as ChatAPI from '../chat/chat'; import * as MessagesAPI from './threads/messages'; import * as ThreadsAPI from './threads/threads'; -import * as VectorStoresAPI from './vector-stores/vector-stores'; import * as RunsAPI from './threads/runs/runs'; import * as StepsAPI from './threads/runs/steps'; import { CursorPage, type CursorPageParams } from '../../pagination'; @@ -1105,7 +1103,7 @@ export interface AssistantCreateParams { * [Model overview](https://platform.openai.com/docs/models) for descriptions of * them. */ - model: (string & {}) | ChatAPI.ChatModel; + model: (string & {}) | Shared.ChatModel; /** * The description of the assistant. The maximum length is 512 characters. @@ -1134,14 +1132,14 @@ export interface AssistantCreateParams { name?: string | null; /** - * **o1 and o3-mini models only** + * **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: 'low' | 'medium' | 'high' | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * Specifies the format that the model must output. Compatible with @@ -1244,9 +1242,9 @@ export namespace AssistantCreateParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -1265,6 +1263,45 @@ export namespace AssistantCreateParams { */ metadata?: Shared.Metadata | null; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } @@ -1337,14 +1374,14 @@ export interface AssistantUpdateParams { name?: string | null; /** - * **o1 and o3-mini models only** + * **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: 'low' | 'medium' | 'high' | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * Specifies the format that the model must output. Compatible with diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index df929b2f7..0b909de18 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -40,36 +40,16 @@ import { ThreadUpdateParams, Threads, } from './threads/threads'; -import * as VectorStoresAPI from './vector-stores/vector-stores'; -import { - AutoFileChunkingStrategyParam, - FileChunkingStrategy, - FileChunkingStrategyParam, - OtherFileChunkingStrategyObject, - StaticFileChunkingStrategy, - StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyObjectParam, - VectorStore, - VectorStoreCreateParams, - VectorStoreDeleted, - VectorStoreListParams, - VectorStoreUpdateParams, - VectorStores, - VectorStoresPage, -} from './vector-stores/vector-stores'; import { Chat } from './chat/chat'; export class Beta extends APIResource { realtime: RealtimeAPI.Realtime = new RealtimeAPI.Realtime(this._client); - vectorStores: VectorStoresAPI.VectorStores = new VectorStoresAPI.VectorStores(this._client); chat: ChatAPI.Chat = new ChatAPI.Chat(this._client); assistants: AssistantsAPI.Assistants = new AssistantsAPI.Assistants(this._client); threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this._client); } Beta.Realtime = Realtime; -Beta.VectorStores = VectorStores; -Beta.VectorStoresPage = VectorStoresPage; Beta.Assistants = Assistants; Beta.AssistantsPage = AssistantsPage; Beta.Threads = Threads; @@ -77,23 +57,6 @@ Beta.Threads = Threads; export declare namespace Beta { export { Realtime as Realtime }; - export { - VectorStores as VectorStores, - type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, - type FileChunkingStrategy as FileChunkingStrategy, - type FileChunkingStrategyParam as FileChunkingStrategyParam, - type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, - type StaticFileChunkingStrategy as StaticFileChunkingStrategy, - type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, - type VectorStore as VectorStore, - type VectorStoreDeleted as VectorStoreDeleted, - VectorStoresPage as VectorStoresPage, - type VectorStoreCreateParams as VectorStoreCreateParams, - type VectorStoreUpdateParams as VectorStoreUpdateParams, - type VectorStoreListParams as VectorStoreListParams, - }; - export { Chat }; export { diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index babca0016..b9cef17cb 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -37,19 +37,3 @@ export { type ThreadCreateAndRunPollParams, type ThreadCreateAndRunStreamParams, } from './threads/index'; -export { - VectorStoresPage, - VectorStores, - type AutoFileChunkingStrategyParam, - type FileChunkingStrategy, - type FileChunkingStrategyParam, - type OtherFileChunkingStrategyObject, - type StaticFileChunkingStrategy, - type StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyObjectParam, - type VectorStore, - type VectorStoreDeleted, - type VectorStoreCreateParams, - type VectorStoreUpdateParams, - type VectorStoreListParams, -} from './vector-stores/index'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 8ab94cc99..15bfb4204 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -10,7 +10,6 @@ import { RunSubmitToolOutputsParamsStream } from '../../../../lib/AssistantStrea import * as RunsAPI from './runs'; import * as Shared from '../../../shared'; import * as AssistantsAPI from '../../assistants'; -import * as ChatAPI from '../../../chat/chat'; import * as MessagesAPI from '../messages'; import * as ThreadsAPI from '../threads'; import * as StepsAPI from './steps'; @@ -722,7 +721,7 @@ export interface RunCreateParamsBase { * associated with the assistant. If not, the model associated with the assistant * will be used. */ - model?: (string & {}) | ChatAPI.ChatModel | null; + model?: (string & {}) | Shared.ChatModel | null; /** * Body param: Whether to enable @@ -732,14 +731,14 @@ export interface RunCreateParamsBase { parallel_tool_calls?: boolean; /** - * Body param: **o1 and o3-mini models only** + * Body param: **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: 'low' | 'medium' | 'high' | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * Body param: Specifies the format that the model must output. Compatible with diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 3f69c6e60..8075ba0ac 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -8,7 +8,6 @@ import * as Core from '../../../core'; import * as ThreadsAPI from './threads'; import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; -import * as ChatAPI from '../../chat/chat'; import * as MessagesAPI from './messages'; import { Annotation, @@ -45,7 +44,6 @@ import { TextDelta, TextDeltaBlock, } from './messages'; -import * as VectorStoresAPI from '../vector-stores/vector-stores'; import * as RunsAPI from './runs/runs'; import { RequiredActionFunctionToolCall, @@ -441,9 +439,9 @@ export namespace ThreadCreateParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -462,6 +460,45 @@ export namespace ThreadCreateParams { */ metadata?: Shared.Metadata | null; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } @@ -573,7 +610,7 @@ export interface ThreadCreateAndRunParamsBase { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: (string & {}) | ChatAPI.ChatModel | null; + model?: (string & {}) | Shared.ChatModel | null; /** * Whether to enable @@ -800,9 +837,9 @@ export namespace ThreadCreateAndRunParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -821,6 +858,45 @@ export namespace ThreadCreateAndRunParams { */ metadata?: Shared.Metadata | null; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 627b4fc23..9dbc636d8 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,6 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import * as Shared from '../shared'; import * as CompletionsAPI from './completions/completions'; import { ChatCompletion, @@ -52,48 +53,7 @@ export class Chat extends APIResource { completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); } -export type ChatModel = - | 'o3-mini' - | 'o3-mini-2025-01-31' - | 'o1' - | 'o1-2024-12-17' - | 'o1-preview' - | 'o1-preview-2024-09-12' - | 'o1-mini' - | 'o1-mini-2024-09-12' - | 'gpt-4.5-preview' - | 'gpt-4.5-preview-2025-02-27' - | 'gpt-4o' - | 'gpt-4o-2024-11-20' - | 'gpt-4o-2024-08-06' - | 'gpt-4o-2024-05-13' - | 'gpt-4o-audio-preview' - | 'gpt-4o-audio-preview-2024-10-01' - | 'gpt-4o-audio-preview-2024-12-17' - | 'gpt-4o-mini-audio-preview' - | 'gpt-4o-mini-audio-preview-2024-12-17' - | 'chatgpt-4o-latest' - | 'gpt-4o-mini' - | 'gpt-4o-mini-2024-07-18' - | 'gpt-4-turbo' - | 'gpt-4-turbo-2024-04-09' - | 'gpt-4-0125-preview' - | 'gpt-4-turbo-preview' - | 'gpt-4-1106-preview' - | 'gpt-4-vision-preview' - | 'gpt-4' - | 'gpt-4-0314' - | 'gpt-4-0613' - | 'gpt-4-32k' - | 'gpt-4-32k-0314' - | 'gpt-4-32k-0613' - | 'gpt-3.5-turbo' - | 'gpt-3.5-turbo-16k' - | 'gpt-3.5-turbo-0301' - | 'gpt-3.5-turbo-0613' - | 'gpt-3.5-turbo-1106' - | 'gpt-3.5-turbo-0125' - | 'gpt-3.5-turbo-16k-0613'; +export type ChatModel = Shared.ChatModel; Chat.Completions = Completions; Chat.ChatCompletionsPage = ChatCompletionsPage; @@ -123,7 +83,6 @@ export declare namespace Chat { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, @@ -134,6 +93,7 @@ export declare namespace Chat { type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type CompletionCreateParams as CompletionCreateParams, diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 3af4a3a1d..7b1c353e2 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -7,7 +7,6 @@ import * as Core from '../../../core'; import * as CompletionsCompletionsAPI from './completions'; import * as CompletionsAPI from '../../completions'; import * as Shared from '../../shared'; -import * as ChatAPI from '../chat'; import * as MessagesAPI from './messages'; import { MessageListParams, Messages } from './messages'; import { CursorPage, type CursorPageParams } from '../../../pagination'; @@ -17,6 +16,13 @@ export class Completions extends APIResource { messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client); /** + * **Starting a new project?** We recommend trying + * [Responses](https://platform.openai.com/docs/api-reference/responses) to take + * advantage of the latest OpenAI platform features. Compare + * [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + * + * --- + * * Creates a model response for the given chat conversation. Learn more in the * [text generation](https://platform.openai.com/docs/guides/text-generation), * [vision](https://platform.openai.com/docs/guides/vision), and @@ -50,7 +56,7 @@ export class Completions extends APIResource { } /** - * Get a stored chat completion. Only chat completions that have been created with + * Get a stored chat completion. Only Chat Completions that have been created with * the `store` parameter set to `true` will be returned. */ retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise { @@ -58,7 +64,7 @@ export class Completions extends APIResource { } /** - * Modify a stored chat completion. Only chat completions that have been created + * Modify a stored chat completion. Only Chat Completions that have been created * with the `store` parameter set to `true` can be modified. Currently, the only * supported modification is to update the `metadata` field. */ @@ -71,7 +77,7 @@ export class Completions extends APIResource { } /** - * List stored chat completions. Only chat completions that have been stored with + * List stored Chat Completions. Only Chat Completions that have been stored with * the `store` parameter set to `true` will be returned. */ list( @@ -90,7 +96,7 @@ export class Completions extends APIResource { } /** - * Delete a stored chat completion. Only chat completions that have been created + * Delete a stored chat completion. Only Chat Completions that have been created * with the `store` parameter set to `true` can be deleted. */ del(completionId: string, options?: Core.RequestOptions): Core.APIPromise { @@ -316,16 +322,16 @@ export interface ChatCompletionAudioParam { format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; /** - * The voice the model uses to respond. Supported voices are `ash`, `ballad`, - * `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, - * `echo`, and `shimmer`; these voices are less expressive). + * The voice the model uses to respond. Supported voices are `alloy`, `ash`, + * `ballad`, `coral`, `echo`, `sage`, and `shimmer`. */ voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; } /** - * Represents a streamed chunk of a chat completion response returned by model, + * Represents a streamed chunk of a chat completion response returned by the model, * based on the provided input. + * [Learn more](https://platform.openai.com/docs/guides/streaming-responses). */ export interface ChatCompletionChunk { /** @@ -512,7 +518,43 @@ export namespace ChatCompletionChunk { export type ChatCompletionContentPart = | ChatCompletionContentPartText | ChatCompletionContentPartImage - | ChatCompletionContentPartInputAudio; + | ChatCompletionContentPartInputAudio + | ChatCompletionContentPart.File; + +export namespace ChatCompletionContentPart { + /** + * Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text + * generation. + */ + export interface File { + file: File.File; + + /** + * The type of the content part. Always `file`. + */ + type: 'file'; + } + + export namespace File { + export interface File { + /** + * The base64 encoded file data, used when passing the file to the model as a + * string. + */ + file_data?: string; + + /** + * The ID of an uploaded file to use as input. + */ + file_id?: string; + + /** + * The name of the file, used when passing the file to the model as a string. + */ + file_name?: string; + } + } +} /** * Learn about [image inputs](https://platform.openai.com/docs/guides/vision). @@ -685,6 +727,12 @@ export interface ChatCompletionMessage { */ role: 'assistant'; + /** + * Annotations for the message, when applicable, as when using the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + annotations?: Array; + /** * If the audio output modality is requested, this object contains data about the * audio response from the model. @@ -705,6 +753,48 @@ export interface ChatCompletionMessage { } export namespace ChatCompletionMessage { + /** + * A URL citation when using web search. + */ + export interface Annotation { + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * A URL citation when using web search. + */ + url_citation: Annotation.URLCitation; + } + + export namespace Annotation { + /** + * A URL citation when using web search. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The URL of the web resource. + */ + url: string; + } + } + /** * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a * function that should be called, as generated by the model. @@ -818,16 +908,6 @@ export interface ChatCompletionPredictionContent { type: 'content'; } -/** - * **o1 and o3-mini models only** - * - * Constrains effort on reasoning for - * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - * result in faster responses and fewer tokens used on reasoning in a response. - */ -export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high' | null; - /** * The role of the author of a message */ @@ -998,6 +1078,8 @@ export interface ChatCompletionUserMessageParam { */ export type CreateChatCompletionRequestMessage = ChatCompletionMessageParam; +export type ChatCompletionReasoningEffort = Shared.ReasoningEffort | null; + export type ChatCompletionCreateParams = | ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming; @@ -1014,11 +1096,13 @@ export interface ChatCompletionCreateParamsBase { messages: Array; /** - * ID of the model to use. See the - * [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - * table for details on which models work with the Chat API. + * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. */ - model: (string & {}) | ChatAPI.ChatModel; + model: (string & {}) | Shared.ChatModel; /** * Parameters for audio output. Required when audio output is requested with @@ -1107,8 +1191,8 @@ export interface ChatCompletionCreateParamsBase { metadata?: Shared.Metadata | null; /** - * Output types that you would like the model to generate for this request. Most - * models are capable of generating text, which is the default: + * Output types that you would like the model to generate. Most models are capable + * of generating text, which is the default: * * `["text"]` * @@ -1118,7 +1202,7 @@ export interface ChatCompletionCreateParamsBase { * * `["text", "audio"]` */ - modalities?: Array | null; + modalities?: Array<'text' | 'audio'> | null; /** * How many chat completion choices to generate for each input message. Note that @@ -1148,14 +1232,14 @@ export interface ChatCompletionCreateParamsBase { presence_penalty?: number | null; /** - * **o1 and o3-mini models only** + * **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: ChatCompletionReasoningEffort | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * An object specifying the format that the model must output. @@ -1165,21 +1249,14 @@ export interface ChatCompletionCreateParamsBase { * in the * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). * - * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - * message the model generates is valid JSON. - * - * **Important:** when using JSON mode, you **must** also instruct the model to - * produce JSON yourself via a system or user message. Without this, the model may - * generate an unending stream of whitespace until the generation reaches the token - * limit, resulting in a long-running and seemingly "stuck" request. Also note that - * the message content may be partially cut off if `finish_reason="length"`, which - * indicates the generation exceeded `max_tokens` or the conversation exceeded the - * max context length. + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. */ response_format?: | Shared.ResponseFormatText - | Shared.ResponseFormatJSONObject - | Shared.ResponseFormatJSONSchema; + | Shared.ResponseFormatJSONSchema + | Shared.ResponseFormatJSONObject; /** * This feature is in Beta. If specified, our system will make a best effort to @@ -1198,15 +1275,19 @@ export interface ChatCompletionCreateParamsBase { * utilize scale tier credits until they are exhausted. * - If set to 'auto', and the Project is not Scale tier enabled, the request will * be processed using the default service tier with a lower uptime SLA and no - * latency guarantee. + * latency guarentee. * - If set to 'default', the request will be processed using the default service - * tier with a lower uptime SLA and no latency guarantee. + * tier with a lower uptime SLA and no latency guarentee. * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. */ service_tier?: 'auto' | 'default' | null; /** - * Up to 4 sequences where the API will stop generating further tokens. + * Up to 4 sequences where the API will stop generating further tokens. The + * returned text will not contain the stop sequence. */ stop?: string | null | Array; @@ -1218,12 +1299,14 @@ export interface ChatCompletionCreateParamsBase { store?: boolean | null; /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. */ stream?: boolean | null; @@ -1282,6 +1365,13 @@ export interface ChatCompletionCreateParamsBase { * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; + + /** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + web_search_options?: ChatCompletionCreateParams.WebSearchOptions; } export namespace ChatCompletionCreateParams { @@ -1313,6 +1403,70 @@ export namespace ChatCompletionCreateParams { parameters?: Shared.FunctionParameters; } + /** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + export interface WebSearchOptions { + /** + * High level guidance for the amount of context window space to use for the + * search. One of `low`, `medium`, or `high`. `medium` is the default. + */ + search_context_size?: 'low' | 'medium' | 'high'; + + /** + * Approximate location parameters for the search. + */ + user_location?: WebSearchOptions.UserLocation | null; + } + + export namespace WebSearchOptions { + /** + * Approximate location parameters for the search. + */ + export interface UserLocation { + /** + * Approximate location parameters for the search. + */ + approximate: UserLocation.Approximate; + + /** + * The type of location approximation. Always `approximate`. + */ + type: 'approximate'; + } + + export namespace UserLocation { + /** + * Approximate location parameters for the search. + */ + export interface Approximate { + /** + * Free text input for the city of the user, e.g. `San Francisco`. + */ + city?: string; + + /** + * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + * the user, e.g. `US`. + */ + country?: string; + + /** + * Free text input for the region of the user, e.g. `California`. + */ + region?: string; + + /** + * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + * user, e.g. `America/Los_Angeles`. + */ + timezone?: string; + } + } + } + export type ChatCompletionCreateParamsNonStreaming = CompletionsCompletionsAPI.ChatCompletionCreateParamsNonStreaming; export type ChatCompletionCreateParamsStreaming = @@ -1326,12 +1480,14 @@ export type CompletionCreateParams = ChatCompletionCreateParams; export interface ChatCompletionCreateParamsNonStreaming extends ChatCompletionCreateParamsBase { /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. */ stream?: false | null; } @@ -1343,12 +1499,14 @@ export type CompletionCreateParamsNonStreaming = ChatCompletionCreateParamsNonSt export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreateParamsBase { /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. */ stream: true; } @@ -1377,19 +1535,19 @@ export type CompletionUpdateParams = ChatCompletionUpdateParams; export interface ChatCompletionListParams extends CursorPageParams { /** - * A list of metadata keys to filter the chat completions by. Example: + * A list of metadata keys to filter the Chat Completions by. Example: * * `metadata[key1]=value1&metadata[key2]=value2` */ metadata?: Shared.Metadata | null; /** - * The model used to generate the chat completions. + * The model used to generate the Chat Completions. */ model?: string; /** - * Sort order for chat completions by timestamp. Use `asc` for ascending order or + * Sort order for Chat Completions by timestamp. Use `asc` for ascending order or * `desc` for descending order. Defaults to `asc`. */ order?: 'asc' | 'desc'; @@ -1425,7 +1583,6 @@ export declare namespace Completions { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, @@ -1436,6 +1593,7 @@ export declare namespace Completions { type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type CompletionCreateParams as CompletionCreateParams, diff --git a/src/resources/chat/completions/index.ts b/src/resources/chat/completions/index.ts index 3691f41d8..994d6f880 100644 --- a/src/resources/chat/completions/index.ts +++ b/src/resources/chat/completions/index.ts @@ -24,7 +24,6 @@ export { type ChatCompletionModality, type ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort, type ChatCompletionRole, type ChatCompletionStoreMessage, type ChatCompletionStreamOptions, diff --git a/src/resources/chat/completions/messages.ts b/src/resources/chat/completions/messages.ts index fc1cc5d94..519a33aff 100644 --- a/src/resources/chat/completions/messages.ts +++ b/src/resources/chat/completions/messages.ts @@ -9,7 +9,7 @@ import { type CursorPageParams } from '../../../pagination'; export class Messages extends APIResource { /** - * Get the messages in a stored chat completion. Only chat completions that have + * Get the messages in a stored chat completion. Only Chat Completions that have * been created with the `store` parameter set to `true` will be returned. */ list( diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index a9b5b46fb..62ca758e0 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { Chat, type ChatModel } from './chat'; +export { Chat } from './chat'; export { ChatCompletionStoreMessagesPage, ChatCompletionsPage, @@ -25,7 +25,6 @@ export { type ChatCompletionModality, type ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort, type ChatCompletionRole, type ChatCompletionStoreMessage, type ChatCompletionStreamOptions, diff --git a/src/resources/files.ts b/src/resources/files.ts index f5f23dcad..723ac4cde 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -186,16 +186,12 @@ export interface FileObject { } /** - * The intended purpose of the uploaded file. - * - * Use "assistants" for - * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - * [Message](https://platform.openai.com/docs/api-reference/messages) files, - * "vision" for Assistants image file inputs, "batch" for - * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + * The intended purpose of the uploaded file. One of: - `assistants`: Used in the + * Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + * fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + * Flexible file type for any purpose - `evals`: Used for eval data sets */ -export type FilePurpose = 'assistants' | 'batch' | 'fine-tune' | 'vision'; +export type FilePurpose = 'assistants' | 'batch' | 'fine-tune' | 'vision' | 'user_data' | 'evals'; export interface FileCreateParams { /** @@ -204,14 +200,10 @@ export interface FileCreateParams { file: Core.Uploadable; /** - * The intended purpose of the uploaded file. - * - * Use "assistants" for - * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - * [Message](https://platform.openai.com/docs/api-reference/messages) files, - * "vision" for Assistants image file inputs, "batch" for - * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + * The intended purpose of the uploaded file. One of: - `assistants`: Used in the + * Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + * fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + * Flexible file type for any purpose - `evals`: Used for eval data sets */ purpose: FilePurpose; } diff --git a/src/resources/index.ts b/src/resources/index.ts index ad0302357..04c2c887b 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -60,4 +60,24 @@ export { type ModerationCreateResponse, type ModerationCreateParams, } from './moderations'; +export { Responses } from './responses/responses'; export { Uploads, type Upload, type UploadCreateParams, type UploadCompleteParams } from './uploads/uploads'; +export { + VectorStoresPage, + VectorStoreSearchResponsesPage, + VectorStores, + type AutoFileChunkingStrategyParam, + type FileChunkingStrategy, + type FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyObjectParam, + type VectorStore, + type VectorStoreDeleted, + type VectorStoreSearchResponse, + type VectorStoreCreateParams, + type VectorStoreUpdateParams, + type VectorStoreListParams, + type VectorStoreSearchParams, +} from './vector-stores/vector-stores'; diff --git a/src/resources/responses/index.ts b/src/resources/responses/index.ts new file mode 100644 index 000000000..84f761a93 --- /dev/null +++ b/src/resources/responses/index.ts @@ -0,0 +1,9 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + ResponseItemListDataPage, + InputItems, + type ResponseItemList, + type InputItemListParams, +} from './input-items'; +export { Responses } from './responses'; diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts new file mode 100644 index 000000000..9704be89a --- /dev/null +++ b/src/resources/responses/input-items.ts @@ -0,0 +1,276 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as ResponsesAPI from './responses'; +import { CursorPage, type CursorPageParams } from '../../pagination'; + +export class InputItems extends APIResource { + /** + * Returns a list of input items for a given response. + */ + list( + responseId: string, + query?: InputItemListParams, + options?: Core.RequestOptions, + ): Core.PagePromise< + ResponseItemListDataPage, + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + >; + list( + responseId: string, + options?: Core.RequestOptions, + ): Core.PagePromise< + ResponseItemListDataPage, + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + >; + list( + responseId: string, + query: InputItemListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise< + ResponseItemListDataPage, + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + > { + if (isRequestOptions(query)) { + return this.list(responseId, {}, query); + } + return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemListDataPage, { + query, + ...options, + }); + } +} + +export class ResponseItemListDataPage extends CursorPage< + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput +> {} + +/** + * A list of Response items. + */ +export interface ResponseItemList { + /** + * A list of items used to generate this response. + */ + data: Array< + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + >; + + /** + * The ID of the first item in the list. + */ + first_id: string; + + /** + * Whether there are more items available. + */ + has_more: boolean; + + /** + * The ID of the last item in the list. + */ + last_id: string; + + /** + * The type of object returned, must be `list`. + */ + object: 'list'; +} + +export namespace ResponseItemList { + export interface Message { + /** + * The unique ID of the message input. + */ + id: string; + + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponsesAPI.ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; + } + + export interface ComputerCallOutput { + /** + * The unique ID of the computer call tool output. + */ + id: string; + + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ComputerCallOutput.Output; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace ComputerCallOutput { + /** + * A computer screenshot image used with the computer use tool. + */ + export interface Output { + /** + * Specifies the event type. For a computer screenshot, this property is always set + * to `computer_screenshot`. + */ + type: 'computer_screenshot'; + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + file_id?: string; + + /** + * The URL of the screenshot image. + */ + image_url?: string; + } + + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } + } + + export interface FunctionCallOutput { + /** + * The unique ID of the function call tool output. + */ + id: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } +} + +export interface InputItemListParams extends CursorPageParams { + /** + * An item ID to list items before, used in pagination. + */ + before?: string; + + /** + * The order to return the input items in. Default is `asc`. + * + * - `asc`: Return the input items in ascending order. + * - `desc`: Return the input items in descending order. + */ + order?: 'asc' | 'desc'; +} + +InputItems.ResponseItemListDataPage = ResponseItemListDataPage; + +export declare namespace InputItems { + export { + type ResponseItemList as ResponseItemList, + ResponseItemListDataPage as ResponseItemListDataPage, + type InputItemListParams as InputItemListParams, + }; +} diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts new file mode 100644 index 000000000..2ad146873 --- /dev/null +++ b/src/resources/responses/responses.ts @@ -0,0 +1,2761 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { + type ExtractParsedContentFromParams, + parseResponse, + type ResponseCreateParamsWithTools, + addOutputText, +} from '../../lib/ResponsesParser'; +import * as Core from '../../core'; +import { APIPromise, isRequestOptions } from '../../core'; +import { APIResource } from '../../resource'; +import { Stream } from '../../streaming'; +import * as Shared from '../shared'; +import * as InputItemsAPI from './input-items'; +import { InputItemListParams, InputItems, ResponseItemList, ResponseItemListDataPage } from './input-items'; +import * as ResponsesAPI from './responses'; +import { ResponseStream, ResponseStreamParams } from '../../lib/responses/ResponseStream'; + +export interface ParsedResponseOutputText extends ResponseOutputText { + parsed: ParsedT | null; +} + +export type ParsedContent = ParsedResponseOutputText | ResponseOutputRefusal; + +export interface ParsedResponseOutputMessage extends ResponseOutputMessage { + content: ParsedContent[]; +} + +export interface ParsedResponseFunctionToolCall extends ResponseFunctionToolCall { + parsed_arguments: any; +} + +export type ParsedResponseOutputItem = + | ParsedResponseOutputMessage + | ParsedResponseFunctionToolCall + | ResponseFileSearchToolCall + | ResponseFunctionWebSearch + | ResponseComputerToolCall + | ResponseOutputItem.Reasoning; + +export interface ParsedResponse extends Response { + output: Array>; + + output_parsed: ParsedT | null; +} + +export type ResponseParseParams = ResponseCreateParamsNonStreaming; +export class Responses extends APIResource { + inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client); + + /** + * Creates a model response. Provide + * [text](https://platform.openai.com/docs/guides/text) or + * [image](https://platform.openai.com/docs/guides/images) inputs to generate + * [text](https://platform.openai.com/docs/guides/text) or + * [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + * the model call your own + * [custom code](https://platform.openai.com/docs/guides/function-calling) or use + * built-in [tools](https://platform.openai.com/docs/guides/tools) like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + * your own data as input for the model's response. + */ + create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; + create( + body: ResponseCreateParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + create( + body: ResponseCreateParamsBase, + options?: Core.RequestOptions, + ): APIPromise | Response>; + create( + body: ResponseCreateParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + return ( + this._client.post('/responses', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise> + )._thenUnwrap((rsp) => { + if ('type' in rsp && rsp.type === 'response') { + addOutputText(rsp as Response); + } + + return rsp; + }) as APIPromise | APIPromise>; + } + + /** + * Retrieves a model response with the given ID. + */ + retrieve( + responseId: string, + query?: ResponseRetrieveParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve(responseId: string, options?: Core.RequestOptions): Core.APIPromise; + retrieve( + responseId: string, + query: ResponseRetrieveParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.retrieve(responseId, {}, query); + } + return this._client.get(`/responses/${responseId}`, { query, ...options }); + } + + /** + * Deletes a model response with the given ID. + */ + del(responseId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/responses/${responseId}`, { + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); + } + + parse>( + body: Params, + options?: Core.RequestOptions, + ): Core.APIPromise> { + return this._client.responses + .create(body, options) + ._thenUnwrap((response) => parseResponse(response as Response, body)); + } + + /** + * Creates a chat completion stream + */ + stream>( + body: Params, + options?: Core.RequestOptions, + ): ResponseStream { + return ResponseStream.createResponse(this._client, body, options); + } +} + +/** + * A tool that controls a virtual computer. Learn more about the + * [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + */ +export interface ComputerTool { + /** + * The height of the computer display. + */ + display_height: number; + + /** + * The width of the computer display. + */ + display_width: number; + + /** + * The type of computer environment to control. + */ + environment: 'mac' | 'windows' | 'ubuntu' | 'browser'; + + /** + * The type of the computer use tool. Always `computer_use_preview`. + */ + type: 'computer-preview'; +} + +/** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ +export interface EasyInputMessage { + /** + * Text, image, or audio input to the model, used to generate a response. Can also + * contain previous assistant responses. + */ + content: string | ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; +} + +/** + * A tool that searches for relevant content from uploaded files. Learn more about + * the + * [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + */ +export interface FileSearchTool { + /** + * The type of the file search tool. Always `file_search`. + */ + type: 'file_search'; + + /** + * The IDs of the vector stores to search. + */ + vector_store_ids: Array; + + /** + * A filter to apply based on file attributes. + */ + filters?: Shared.ComparisonFilter | Shared.CompoundFilter; + + /** + * The maximum number of results to return. This number should be between 1 and 50 + * inclusive. + */ + max_num_results?: number; + + /** + * Ranking options for search. + */ + ranking_options?: FileSearchTool.RankingOptions; +} + +export namespace FileSearchTool { + /** + * Ranking options for search. + */ + export interface RankingOptions { + /** + * The ranker to use for the file search. + */ + ranker?: 'auto' | 'default-2024-11-15'; + + /** + * The score threshold for the file search, a number between 0 and 1. Numbers + * closer to 1 will attempt to return only the most relevant results, but may + * return fewer results. + */ + score_threshold?: number; + } +} + +/** + * Defines a function in your own code the model can choose to call. Learn more + * about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ +export interface FunctionTool { + /** + * The name of the function to call. + */ + name: string; + + /** + * A JSON schema object describing the parameters of the function. + */ + parameters: Record; + + /** + * Whether to enforce strict parameter validation. Default `true`. + */ + strict: boolean; + + /** + * The type of the function tool. Always `function`. + */ + type: 'function'; + + /** + * A description of the function. Used by the model to determine whether or not to + * call the function. + */ + description?: string | null; +} + +export interface Response { + /** + * Unique identifier for this Response. + */ + id: string; + + /** + * Unix timestamp (in seconds) of when this Response was created. + */ + created_at: number; + + output_text: string; + + /** + * An error object returned when the model fails to generate a Response. + */ + error: ResponseError | null; + + /** + * Details about why the response is incomplete. + */ + incomplete_details: Response.IncompleteDetails | null; + + /** + * Inserts a system (or developer) message as the first item in the model's + * context. + * + * When using along with `previous_response_id`, the instructions from a previous + * response will be not be carried over to the next response. This makes it simple + * to swap out system (or developer) messages in new responses. + */ + instructions: string | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. + */ + model: (string & {}) | Shared.ChatModel; + + /** + * The object type of this resource - always set to `response`. + */ + object: 'response'; + + /** + * An array of content items generated by the model. + * + * - The length and order of items in the `output` array is dependent on the + * model's response. + * - Rather than accessing the first item in the `output` array and assuming it's + * an `assistant` message with the content generated by the model, you might + * consider using the `output_text` property where supported in SDKs. + */ + output: Array; + + /** + * Whether to allow the model to run tool calls in parallel. + */ + parallel_tool_calls: boolean; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. + */ + temperature: number | null; + + /** + * How the model should select which tool (or tools) to use when generating a + * response. See the `tools` parameter to see how to specify which tools the model + * can call. + */ + tool_choice: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction; + + /** + * An array of tools the model may call while generating a response. You can + * specify which tool to use by setting the `tool_choice` parameter. + * + * The two categories of tools you can provide the model are: + * + * - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + * capabilities, like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search). + * Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * - **Function calls (custom tools)**: Functions that are defined by you, enabling + * the model to call your own code. Learn more about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ + tools: Array; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p: number | null; + + /** + * An upper bound for the number of tokens that can be generated for a response, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + max_output_tokens?: number | null; + + /** + * The unique ID of the previous response to the model. Use this to create + * multi-turn conversations. Learn more about + * [conversation state](https://platform.openai.com/docs/guides/conversation-state). + */ + previous_response_id?: string | null; + + /** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ + reasoning?: Shared.Reasoning | null; + + /** + * The status of the response generation. One of `completed`, `failed`, + * `in_progress`, or `incomplete`. + */ + status?: ResponseStatus; + + /** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ + text?: ResponseTextConfig; + + /** + * The truncation strategy to use for the model response. + * + * - `auto`: If the context of this response and previous ones exceeds the model's + * context window size, the model will truncate the response to fit the context + * window by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size + * for a model, the request will fail with a 400 error. + */ + truncation?: 'auto' | 'disabled' | null; + + /** + * Represents token usage details including input tokens, output tokens, a + * breakdown of output tokens, and the total tokens used. + */ + usage?: ResponseUsage; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + user?: string; +} + +export namespace Response { + /** + * Details about why the response is incomplete. + */ + export interface IncompleteDetails { + /** + * The reason why the response is incomplete. + */ + reason?: 'max_output_tokens' | 'content_filter'; + } +} + +/** + * Emitted when there is a partial audio response. + */ +export interface ResponseAudioDeltaEvent { + /** + * A chunk of Base64 encoded response audio bytes. + */ + delta: string; + + /** + * The type of the event. Always `response.audio.delta`. + */ + type: 'response.audio.delta'; +} + +/** + * Emitted when the audio response is complete. + */ +export interface ResponseAudioDoneEvent { + /** + * The type of the event. Always `response.audio.done`. + */ + type: 'response.audio.done'; +} + +/** + * Emitted when there is a partial transcript of audio. + */ +export interface ResponseAudioTranscriptDeltaEvent { + /** + * The partial transcript of the audio response. + */ + delta: string; + + /** + * The type of the event. Always `response.audio.transcript.delta`. + */ + type: 'response.audio.transcript.delta'; +} + +/** + * Emitted when the full audio transcript is completed. + */ +export interface ResponseAudioTranscriptDoneEvent { + /** + * The type of the event. Always `response.audio.transcript.done`. + */ + type: 'response.audio.transcript.done'; +} + +/** + * Emitted when a partial code snippet is added by the code interpreter. + */ +export interface ResponseCodeInterpreterCallCodeDeltaEvent { + /** + * The partial code snippet added by the code interpreter. + */ + delta: string; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.code.delta`. + */ + type: 'response.code_interpreter_call.code.delta'; +} + +/** + * Emitted when code snippet output is finalized by the code interpreter. + */ +export interface ResponseCodeInterpreterCallCodeDoneEvent { + /** + * The final code snippet output by the code interpreter. + */ + code: string; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.code.done`. + */ + type: 'response.code_interpreter_call.code.done'; +} + +/** + * Emitted when the code interpreter call is completed. + */ +export interface ResponseCodeInterpreterCallCompletedEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.completed`. + */ + type: 'response.code_interpreter_call.completed'; +} + +/** + * Emitted when a code interpreter call is in progress. + */ +export interface ResponseCodeInterpreterCallInProgressEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.in_progress`. + */ + type: 'response.code_interpreter_call.in_progress'; +} + +/** + * Emitted when the code interpreter is actively interpreting the code snippet. + */ +export interface ResponseCodeInterpreterCallInterpretingEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.interpreting`. + */ + type: 'response.code_interpreter_call.interpreting'; +} + +/** + * A tool call to run code. + */ +export interface ResponseCodeInterpreterToolCall { + /** + * The unique ID of the code interpreter tool call. + */ + id: string; + + /** + * The code to run. + */ + code: string; + + /** + * The results of the code interpreter tool call. + */ + results: Array; + + /** + * The status of the code interpreter tool call. + */ + status: 'in_progress' | 'interpreting' | 'completed'; + + /** + * The type of the code interpreter tool call. Always `code_interpreter_call`. + */ + type: 'code_interpreter_call'; +} + +export namespace ResponseCodeInterpreterToolCall { + /** + * The output of a code interpreter tool call that is text. + */ + export interface Logs { + /** + * The logs of the code interpreter tool call. + */ + logs: string; + + /** + * The type of the code interpreter text output. Always `logs`. + */ + type: 'logs'; + } + + /** + * The output of a code interpreter tool call that is a file. + */ + export interface Files { + files: Array; + + /** + * The type of the code interpreter file output. Always `files`. + */ + type: 'files'; + } + + export namespace Files { + export interface File { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The MIME type of the file. + */ + mime_type: string; + } + } +} + +/** + * Emitted when the model response is complete. + */ +export interface ResponseCompletedEvent { + /** + * Properties of the completed response. + */ + response: Response; + + /** + * The type of the event. Always `response.completed`. + */ + type: 'response.completed'; +} + +/** + * A tool call to a computer use tool. See the + * [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) + * for more information. + */ +export interface ResponseComputerToolCall { + /** + * The unique ID of the computer call. + */ + id: string; + + /** + * A click action. + */ + action: + | ResponseComputerToolCall.Click + | ResponseComputerToolCall.DoubleClick + | ResponseComputerToolCall.Drag + | ResponseComputerToolCall.Keypress + | ResponseComputerToolCall.Move + | ResponseComputerToolCall.Screenshot + | ResponseComputerToolCall.Scroll + | ResponseComputerToolCall.Type + | ResponseComputerToolCall.Wait; + + /** + * An identifier used when responding to the tool call with output. + */ + call_id: string; + + /** + * The pending safety checks for the computer call. + */ + pending_safety_checks: Array; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the computer call. Always `computer_call`. + */ + type: 'computer_call'; +} + +export namespace ResponseComputerToolCall { + /** + * A click action. + */ + export interface Click { + /** + * Indicates which mouse button was pressed during the click. One of `left`, + * `right`, `wheel`, `back`, or `forward`. + */ + button: 'left' | 'right' | 'wheel' | 'back' | 'forward'; + + /** + * Specifies the event type. For a click action, this property is always set to + * `click`. + */ + type: 'click'; + + /** + * The x-coordinate where the click occurred. + */ + x: number; + + /** + * The y-coordinate where the click occurred. + */ + y: number; + } + + /** + * A double click action. + */ + export interface DoubleClick { + /** + * Specifies the event type. For a double click action, this property is always set + * to `double_click`. + */ + type: 'double_click'; + + /** + * The x-coordinate where the double click occurred. + */ + x: number; + + /** + * The y-coordinate where the double click occurred. + */ + y: number; + } + + /** + * A drag action. + */ + export interface Drag { + /** + * An array of coordinates representing the path of the drag action. Coordinates + * will appear as an array of objects, eg + * + * ``` + * [ + * { x: 100, y: 200 }, + * { x: 200, y: 300 } + * ] + * ``` + */ + path: Array; + + /** + * Specifies the event type. For a drag action, this property is always set to + * `drag`. + */ + type: 'drag'; + } + + export namespace Drag { + /** + * A series of x/y coordinate pairs in the drag path. + */ + export interface Path { + /** + * The x-coordinate. + */ + x: number; + + /** + * The y-coordinate. + */ + y: number; + } + } + + /** + * A collection of keypresses the model would like to perform. + */ + export interface Keypress { + /** + * The combination of keys the model is requesting to be pressed. This is an array + * of strings, each representing a key. + */ + keys: Array; + + /** + * Specifies the event type. For a keypress action, this property is always set to + * `keypress`. + */ + type: 'keypress'; + } + + /** + * A mouse move action. + */ + export interface Move { + /** + * Specifies the event type. For a move action, this property is always set to + * `move`. + */ + type: 'move'; + + /** + * The x-coordinate to move to. + */ + x: number; + + /** + * The y-coordinate to move to. + */ + y: number; + } + + /** + * A screenshot action. + */ + export interface Screenshot { + /** + * Specifies the event type. For a screenshot action, this property is always set + * to `screenshot`. + */ + type: 'screenshot'; + } + + /** + * A scroll action. + */ + export interface Scroll { + /** + * The horizontal scroll distance. + */ + scroll_x: number; + + /** + * The vertical scroll distance. + */ + scroll_y: number; + + /** + * Specifies the event type. For a scroll action, this property is always set to + * `scroll`. + */ + type: 'scroll'; + + /** + * The x-coordinate where the scroll occurred. + */ + x: number; + + /** + * The y-coordinate where the scroll occurred. + */ + y: number; + } + + /** + * An action to type in text. + */ + export interface Type { + /** + * The text to type. + */ + text: string; + + /** + * Specifies the event type. For a type action, this property is always set to + * `type`. + */ + type: 'type'; + } + + /** + * A wait action. + */ + export interface Wait { + /** + * Specifies the event type. For a wait action, this property is always set to + * `wait`. + */ + type: 'wait'; + } + + /** + * A pending safety check for the computer call. + */ + export interface PendingSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } +} + +/** + * Multi-modal input and output contents. + */ +export type ResponseContent = + | ResponseInputText + | ResponseInputImage + | ResponseInputFile + | ResponseOutputText + | ResponseOutputRefusal; + +/** + * Emitted when a new content part is added. + */ +export interface ResponseContentPartAddedEvent { + /** + * The index of the content part that was added. + */ + content_index: number; + + /** + * The ID of the output item that the content part was added to. + */ + item_id: string; + + /** + * The index of the output item that the content part was added to. + */ + output_index: number; + + /** + * The content part that was added. + */ + part: ResponseOutputText | ResponseOutputRefusal; + + /** + * The type of the event. Always `response.content_part.added`. + */ + type: 'response.content_part.added'; +} + +/** + * Emitted when a content part is done. + */ +export interface ResponseContentPartDoneEvent { + /** + * The index of the content part that is done. + */ + content_index: number; + + /** + * The ID of the output item that the content part was added to. + */ + item_id: string; + + /** + * The index of the output item that the content part was added to. + */ + output_index: number; + + /** + * The content part that is done. + */ + part: ResponseOutputText | ResponseOutputRefusal; + + /** + * The type of the event. Always `response.content_part.done`. + */ + type: 'response.content_part.done'; +} + +/** + * An event that is emitted when a response is created. + */ +export interface ResponseCreatedEvent { + /** + * The response that was created. + */ + response: Response; + + /** + * The type of the event. Always `response.created`. + */ + type: 'response.created'; +} + +/** + * An error object returned when the model fails to generate a Response. + */ +export interface ResponseError { + /** + * The error code for the response. + */ + code: + | 'server_error' + | 'rate_limit_exceeded' + | 'invalid_prompt' + | 'vector_store_timeout' + | 'invalid_image' + | 'invalid_image_format' + | 'invalid_base64_image' + | 'invalid_image_url' + | 'image_too_large' + | 'image_too_small' + | 'image_parse_error' + | 'image_content_policy_violation' + | 'invalid_image_mode' + | 'image_file_too_large' + | 'unsupported_image_media_type' + | 'empty_image_file' + | 'failed_to_download_image' + | 'image_file_not_found'; + + /** + * A human-readable description of the error. + */ + message: string; +} + +/** + * Emitted when an error occurs. + */ +export interface ResponseErrorEvent { + /** + * The error code. + */ + code: string | null; + + /** + * The error message. + */ + message: string; + + /** + * The error parameter. + */ + param: string | null; + + /** + * The type of the event. Always `error`. + */ + type: 'error'; +} + +/** + * An event that is emitted when a response fails. + */ +export interface ResponseFailedEvent { + /** + * The response that failed. + */ + response: Response; + + /** + * The type of the event. Always `response.failed`. + */ + type: 'response.failed'; +} + +/** + * Emitted when a file search call is completed (results found). + */ +export interface ResponseFileSearchCallCompletedEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is initiated. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.completed`. + */ + type: 'response.file_search_call.completed'; +} + +/** + * Emitted when a file search call is initiated. + */ +export interface ResponseFileSearchCallInProgressEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is initiated. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.in_progress`. + */ + type: 'response.file_search_call.in_progress'; +} + +/** + * Emitted when a file search is currently searching. + */ +export interface ResponseFileSearchCallSearchingEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is searching. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.searching`. + */ + type: 'response.file_search_call.searching'; +} + +/** + * The results of a file search tool call. See the + * [file search guide](https://platform.openai.com/docs/guides/tools-file-search) + * for more information. + */ +export interface ResponseFileSearchToolCall { + /** + * The unique ID of the file search tool call. + */ + id: string; + + /** + * The queries used to search for files. + */ + queries: Array; + + /** + * The status of the file search tool call. One of `in_progress`, `searching`, + * `incomplete` or `failed`, + */ + status: 'in_progress' | 'searching' | 'completed' | 'incomplete' | 'failed'; + + /** + * The type of the file search tool call. Always `file_search_call`. + */ + type: 'file_search_call'; + + /** + * The results of the file search tool call. + */ + results?: Array | null; +} + +export namespace ResponseFileSearchToolCall { + export interface Result { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + + /** + * The unique ID of the file. + */ + file_id?: string; + + /** + * The name of the file. + */ + filename?: string; + + /** + * The relevance score of the file - a value between 0 and 1. + */ + score?: number; + + /** + * The text that was retrieved from the file. + */ + text?: string; + } +} + +/** + * An object specifying the format that the model must output. + * + * Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + * ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * The default format is `{ "type": "text" }` with no additional options. + * + * **Not recommended for gpt-4o and newer models:** + * + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. + */ +export type ResponseFormatTextConfig = + | Shared.ResponseFormatText + | ResponseFormatTextJSONSchemaConfig + | Shared.ResponseFormatJSONObject; + +/** + * JSON Schema response format. Used to generate structured JSON responses. Learn + * more about + * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + */ +export interface ResponseFormatTextJSONSchemaConfig { + /** + * The schema for the response format, described as a JSON Schema object. Learn how + * to build JSON schemas [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of response format being defined. Always `json_schema`. + */ + type: 'json_schema'; + + /** + * A description of what the response format is for, used by the model to determine + * how to respond in the format. + */ + description?: string; + + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + */ + name?: string; + + /** + * Whether to enable strict schema adherence when generating the output. If set to + * true, the model will always follow the exact schema defined in the `schema` + * field. Only a subset of JSON Schema is supported when `strict` is `true`. To + * learn more, read the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + */ + strict?: boolean | null; +} + +/** + * Emitted when there is a partial function-call arguments delta. + */ +export interface ResponseFunctionCallArgumentsDeltaEvent { + /** + * The function-call arguments delta that is added. + */ + delta: string; + + /** + * The ID of the output item that the function-call arguments delta is added to. + */ + item_id: string; + + /** + * The index of the output item that the function-call arguments delta is added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.function_call_arguments.delta`. + */ + type: 'response.function_call_arguments.delta'; +} + +/** + * Emitted when function-call arguments are finalized. + */ +export interface ResponseFunctionCallArgumentsDoneEvent { + /** + * The function-call arguments. + */ + arguments: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item. + */ + output_index: number; + + type: 'response.function_call_arguments.done'; +} + +/** + * A tool call to run a function. See the + * [function calling guide](https://platform.openai.com/docs/guides/function-calling) + * for more information. + */ +export interface ResponseFunctionToolCall { + /** + * The unique ID of the function tool call. + */ + id: string; + + /** + * A JSON string of the arguments to pass to the function. + */ + arguments: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * The name of the function to run. + */ + name: string; + + /** + * The type of the function tool call. Always `function_call`. + */ + type: 'function_call'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +/** + * The results of a web search tool call. See the + * [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for + * more information. + */ +export interface ResponseFunctionWebSearch { + /** + * The unique ID of the web search tool call. + */ + id: string; + + /** + * The status of the web search tool call. + */ + status: 'in_progress' | 'searching' | 'completed' | 'failed'; + + /** + * The type of the web search tool call. Always `web_search_call`. + */ + type: 'web_search_call'; +} + +/** + * Emitted when the response is in progress. + */ +export interface ResponseInProgressEvent { + /** + * The response that is in progress. + */ + response: Response; + + /** + * The type of the event. Always `response.in_progress`. + */ + type: 'response.in_progress'; +} + +/** + * Specify additional output data to include in the model response. Currently + * supported values are: + * + * - `file_search_call.results`: Include the search results of the file search tool + * call. + * - `message.input_image.image_url`: Include image urls from the input message. + * - `computer_call_output.output.image_url`: Include image urls from the computer + * call output. + */ +export type ResponseIncludable = + | 'file_search_call.results' + | 'message.input_image.image_url' + | 'computer_call_output.output.image_url'; + +/** + * An event that is emitted when a response finishes as incomplete. + */ +export interface ResponseIncompleteEvent { + /** + * The response that was incomplete. + */ + response: Response; + + /** + * The type of the event. Always `response.incomplete`. + */ + type: 'response.incomplete'; +} + +/** + * A list of one or many input items to the model, containing different content + * types. + */ +export type ResponseInput = Array; + +/** + * An audio input to the model. + */ +export interface ResponseInputAudio { + /** + * Base64-encoded audio data. + */ + data: string; + + /** + * The format of the audio data. Currently supported formats are `mp3` and `wav`. + */ + format: 'mp3' | 'wav'; + + /** + * The type of the input item. Always `input_audio`. + */ + type: 'input_audio'; +} + +/** + * A text input to the model. + */ +export type ResponseInputContent = ResponseInputText | ResponseInputImage | ResponseInputFile; + +/** + * A file input to the model. + */ +export interface ResponseInputFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The content of the file to be sent to the model. + */ + file_data?: string; + + /** + * The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; +} + +/** + * An image input to the model. Learn about + * [image inputs](https://platform.openai.com/docs/guides/vision). + */ +export interface ResponseInputImage { + /** + * The detail level of the image to be sent to the model. One of `high`, `low`, or + * `auto`. Defaults to `auto`. + */ + detail: 'high' | 'low' | 'auto'; + + /** + * The type of the input item. Always `input_image`. + */ + type: 'input_image'; + + /** + * The ID of the file to be sent to the model. + */ + file_id?: string | null; + + /** + * The URL of the image to be sent to the model. A fully qualified URL or base64 + * encoded image in a data URL. + */ + image_url?: string | null; +} + +/** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ +export type ResponseInputItem = + | EasyInputMessage + | ResponseInputItem.Message + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseComputerToolCall + | ResponseInputItem.ComputerCallOutput + | ResponseFunctionWebSearch + | ResponseFunctionToolCall + | ResponseInputItem.FunctionCallOutput + | ResponseInputItem.Reasoning + | ResponseInputItem.ItemReference; + +export namespace ResponseInputItem { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. + */ + export interface Message { + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponsesAPI.ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; + } + + /** + * The output of a computer tool call. + */ + export interface ComputerCallOutput { + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ComputerCallOutput.Output; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The ID of the computer tool call output. + */ + id?: string; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace ComputerCallOutput { + /** + * A computer screenshot image used with the computer use tool. + */ + export interface Output { + /** + * Specifies the event type. For a computer screenshot, this property is always set + * to `computer_screenshot`. + */ + type: 'computer_screenshot'; + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + file_id?: string; + + /** + * The URL of the screenshot image. + */ + image_url?: string; + } + + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } + } + + /** + * The output of a function tool call. + */ + export interface FunctionCallOutput { + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The unique ID of the function tool call output. Populated when this item is + * returned via API. + */ + id?: string; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + /** + * A description of the chain of thought used by a reasoning model while generating + * a response. + */ + export interface Reasoning { + /** + * The unique identifier of the reasoning content. + */ + id: string; + + /** + * Reasoning text contents. + */ + content: Array; + + /** + * The type of the object. Always `reasoning`. + */ + type: 'reasoning'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace Reasoning { + export interface Content { + /** + * A short summary of the reasoning used by the model when generating the response. + */ + text: string; + + /** + * The type of the object. Always `text`. + */ + type: 'reasoning_summary'; + } + } + + /** + * An internal identifier for an item to reference. + */ + export interface ItemReference { + /** + * The ID of the item to reference. + */ + id: string; + + /** + * The type of item to reference. Always `item_reference`. + */ + type: 'item_reference'; + } +} + +/** + * A list of one or many input items to the model, containing different content + * types. + */ +export type ResponseInputMessageContentList = Array; + +/** + * A text input to the model. + */ +export interface ResponseInputText { + /** + * The text input to the model. + */ + text: string; + + /** + * The type of the input item. Always `input_text`. + */ + type: 'input_text'; +} + +/** + * An audio output from the model. + */ +export interface ResponseOutputAudio { + /** + * Base64-encoded audio data from the model. + */ + data: string; + + /** + * The transcript of the audio data from the model. + */ + transcript: string; + + /** + * The type of the output audio. Always `output_audio`. + */ + type: 'output_audio'; +} + +/** + * An output message from the model. + */ +export type ResponseOutputItem = + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseFunctionToolCall + | ResponseFunctionWebSearch + | ResponseComputerToolCall + | ResponseOutputItem.Reasoning; + +export namespace ResponseOutputItem { + /** + * A description of the chain of thought used by a reasoning model while generating + * a response. + */ + export interface Reasoning { + /** + * The unique identifier of the reasoning content. + */ + id: string; + + /** + * Reasoning text contents. + */ + content: Array; + + /** + * The type of the object. Always `reasoning`. + */ + type: 'reasoning'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace Reasoning { + export interface Content { + /** + * A short summary of the reasoning used by the model when generating the response. + */ + text: string; + + /** + * The type of the object. Always `text`. + */ + type: 'reasoning_summary'; + } + } +} + +/** + * Emitted when a new output item is added. + */ +export interface ResponseOutputItemAddedEvent { + /** + * The output item that was added. + */ + item: ResponseOutputItem; + + /** + * The index of the output item that was added. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_item.added`. + */ + type: 'response.output_item.added'; +} + +/** + * Emitted when an output item is marked done. + */ +export interface ResponseOutputItemDoneEvent { + /** + * The output item that was marked done. + */ + item: ResponseOutputItem; + + /** + * The index of the output item that was marked done. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_item.done`. + */ + type: 'response.output_item.done'; +} + +/** + * An output message from the model. + */ +export interface ResponseOutputMessage { + /** + * The unique ID of the output message. + */ + id: string; + + /** + * The content of the output message. + */ + content: Array; + + /** + * The role of the output message. Always `assistant`. + */ + role: 'assistant'; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the output message. Always `message`. + */ + type: 'message'; +} + +/** + * A refusal from the model. + */ +export interface ResponseOutputRefusal { + /** + * The refusal explanationfrom the model. + */ + refusal: string; + + /** + * The type of the refusal. Always `refusal`. + */ + type: 'refusal'; +} + +/** + * A text output from the model. + */ +export interface ResponseOutputText { + /** + * The annotations of the text output. + */ + annotations: Array< + ResponseOutputText.FileCitation | ResponseOutputText.URLCitation | ResponseOutputText.FilePath + >; + + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; +} + +export namespace ResponseOutputText { + /** + * A citation to a file. + */ + export interface FileCitation { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file citation. Always `file_citation`. + */ + type: 'file_citation'; + } + + /** + * A citation for a web resource used to generate a model response. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * The URL of the web resource. + */ + url: string; + } + + /** + * A path to a file. + */ + export interface FilePath { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file path. Always `file_path`. + */ + type: 'file_path'; + } +} + +/** + * Emitted when there is a partial refusal text. + */ +export interface ResponseRefusalDeltaEvent { + /** + * The index of the content part that the refusal text is added to. + */ + content_index: number; + + /** + * The refusal text that is added. + */ + delta: string; + + /** + * The ID of the output item that the refusal text is added to. + */ + item_id: string; + + /** + * The index of the output item that the refusal text is added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.refusal.delta`. + */ + type: 'response.refusal.delta'; +} + +/** + * Emitted when refusal text is finalized. + */ +export interface ResponseRefusalDoneEvent { + /** + * The index of the content part that the refusal text is finalized. + */ + content_index: number; + + /** + * The ID of the output item that the refusal text is finalized. + */ + item_id: string; + + /** + * The index of the output item that the refusal text is finalized. + */ + output_index: number; + + /** + * The refusal text that is finalized. + */ + refusal: string; + + /** + * The type of the event. Always `response.refusal.done`. + */ + type: 'response.refusal.done'; +} + +/** + * The status of the response generation. One of `completed`, `failed`, + * `in_progress`, or `incomplete`. + */ +export type ResponseStatus = 'completed' | 'failed' | 'in_progress' | 'incomplete'; + +/** + * Emitted when there is a partial audio response. + */ +export type ResponseStreamEvent = + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseCodeInterpreterCallCodeDeltaEvent + | ResponseCodeInterpreterCallCodeDoneEvent + | ResponseCodeInterpreterCallCompletedEvent + | ResponseCodeInterpreterCallInProgressEvent + | ResponseCodeInterpreterCallInterpretingEvent + | ResponseCompletedEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseCreatedEvent + | ResponseErrorEvent + | ResponseFileSearchCallCompletedEvent + | ResponseFileSearchCallInProgressEvent + | ResponseFileSearchCallSearchingEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | ResponseInProgressEvent + | ResponseFailedEvent + | ResponseIncompleteEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseRefusalDeltaEvent + | ResponseRefusalDoneEvent + | ResponseTextAnnotationDeltaEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | ResponseWebSearchCallCompletedEvent + | ResponseWebSearchCallInProgressEvent + | ResponseWebSearchCallSearchingEvent; + +/** + * Emitted when a text annotation is added. + */ +export interface ResponseTextAnnotationDeltaEvent { + /** + * A citation to a file. + */ + annotation: + | ResponseTextAnnotationDeltaEvent.FileCitation + | ResponseTextAnnotationDeltaEvent.URLCitation + | ResponseTextAnnotationDeltaEvent.FilePath; + + /** + * The index of the annotation that was added. + */ + annotation_index: number; + + /** + * The index of the content part that the text annotation was added to. + */ + content_index: number; + + /** + * The ID of the output item that the text annotation was added to. + */ + item_id: string; + + /** + * The index of the output item that the text annotation was added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_text.annotation.added`. + */ + type: 'response.output_text.annotation.added'; +} + +export namespace ResponseTextAnnotationDeltaEvent { + /** + * A citation to a file. + */ + export interface FileCitation { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file citation. Always `file_citation`. + */ + type: 'file_citation'; + } + + /** + * A citation for a web resource used to generate a model response. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * The URL of the web resource. + */ + url: string; + } + + /** + * A path to a file. + */ + export interface FilePath { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file path. Always `file_path`. + */ + type: 'file_path'; + } +} + +/** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ +export interface ResponseTextConfig { + /** + * An object specifying the format that the model must output. + * + * Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + * ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * The default format is `{ "type": "text" }` with no additional options. + * + * **Not recommended for gpt-4o and newer models:** + * + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. + */ + format?: ResponseFormatTextConfig; +} + +/** + * Emitted when there is an additional text delta. + */ +export interface ResponseTextDeltaEvent { + /** + * The index of the content part that the text delta was added to. + */ + content_index: number; + + /** + * The text delta that was added. + */ + delta: string; + + /** + * The ID of the output item that the text delta was added to. + */ + item_id: string; + + /** + * The index of the output item that the text delta was added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_text.delta`. + */ + type: 'response.output_text.delta'; +} + +/** + * Emitted when text content is finalized. + */ +export interface ResponseTextDoneEvent { + /** + * The index of the content part that the text content is finalized. + */ + content_index: number; + + /** + * The ID of the output item that the text content is finalized. + */ + item_id: string; + + /** + * The index of the output item that the text content is finalized. + */ + output_index: number; + + /** + * The text content that is finalized. + */ + text: string; + + /** + * The type of the event. Always `response.output_text.done`. + */ + type: 'response.output_text.done'; +} + +/** + * Represents token usage details including input tokens, output tokens, a + * breakdown of output tokens, and the total tokens used. + */ +export interface ResponseUsage { + /** + * The number of input tokens. + */ + input_tokens: number; + + /** + * The number of output tokens. + */ + output_tokens: number; + + /** + * A detailed breakdown of the output tokens. + */ + output_tokens_details: ResponseUsage.OutputTokensDetails; + + /** + * The total number of tokens used. + */ + total_tokens: number; +} + +export namespace ResponseUsage { + /** + * A detailed breakdown of the output tokens. + */ + export interface OutputTokensDetails { + /** + * The number of reasoning tokens. + */ + reasoning_tokens: number; + } +} + +/** + * Emitted when a web search call is completed. + */ +export interface ResponseWebSearchCallCompletedEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.completed`. + */ + type: 'response.web_search_call.completed'; +} + +/** + * Emitted when a web search call is initiated. + */ +export interface ResponseWebSearchCallInProgressEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.in_progress`. + */ + type: 'response.web_search_call.in_progress'; +} + +/** + * Emitted when a web search call is executing. + */ +export interface ResponseWebSearchCallSearchingEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.searching`. + */ + type: 'response.web_search_call.searching'; +} + +/** + * A tool that searches for relevant content from uploaded files. Learn more about + * the + * [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + */ +export type Tool = FileSearchTool | FunctionTool | ComputerTool | WebSearchTool; + +/** + * Use this option to force the model to call a specific function. + */ +export interface ToolChoiceFunction { + /** + * The name of the function to call. + */ + name: string; + + /** + * For function calling, the type is always `function`. + */ + type: 'function'; +} + +/** + * Controls which (if any) tool is called by the model. + * + * `none` means the model will not call any tool and instead generates a message. + * + * `auto` means the model can pick between generating a message or calling one or + * more tools. + * + * `required` means the model must call one or more tools. + */ +export type ToolChoiceOptions = 'none' | 'auto' | 'required'; + +/** + * Indicates that the model should use a built-in tool to generate a response. + * [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). + */ +export interface ToolChoiceTypes { + /** + * The type of hosted tool the model should to use. Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * + * Allowed values are: + * + * - `file_search` + * - `web_search_preview` + * - `computer_use_preview` + */ + type: 'file_search' | 'web_search_preview' | 'computer_use_preview' | 'web_search_preview_2025_03_11'; +} + +/** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + */ +export interface WebSearchTool { + /** + * The type of the web search tool. One of: + * + * - `web_search_preview` + * - `web_search_preview_2025_03_11` + */ + type: 'web_search_preview' | 'web_search_preview_2025_03_11'; + + /** + * High level guidance for the amount of context window space to use for the + * search. One of `low`, `medium`, or `high`. `medium` is the default. + */ + search_context_size?: 'low' | 'medium' | 'high'; + + user_location?: WebSearchTool.UserLocation | null; +} + +export namespace WebSearchTool { + export interface UserLocation { + /** + * The type of location approximation. Always `approximate`. + */ + type: 'approximate'; + + /** + * Free text input for the city of the user, e.g. `San Francisco`. + */ + city?: string; + + /** + * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + * the user, e.g. `US`. + */ + country?: string; + + /** + * Free text input for the region of the user, e.g. `California`. + */ + region?: string; + + /** + * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + * user, e.g. `America/Los_Angeles`. + */ + timezone?: string; + } +} + +export type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsStreaming; + +export interface ResponseCreateParamsBase { + /** + * Text, image, or file inputs to the model, used to generate a response. + * + * Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Image inputs](https://platform.openai.com/docs/guides/images) + * - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + * - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + * - [Function calling](https://platform.openai.com/docs/guides/function-calling) + */ + input: string | ResponseInput; + + /** + * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. + */ + model: (string & {}) | Shared.ChatModel; + + /** + * Specify additional output data to include in the model response. Currently + * supported values are: + * + * - `file_search_call.results`: Include the search results of the file search tool + * call. + * - `message.input_image.image_url`: Include image urls from the input message. + * - `computer_call_output.output.image_url`: Include image urls from the computer + * call output. + */ + include?: Array | null; + + /** + * Inserts a system (or developer) message as the first item in the model's + * context. + * + * When using along with `previous_response_id`, the instructions from a previous + * response will be not be carried over to the next response. This makes it simple + * to swap out system (or developer) messages in new responses. + */ + instructions?: string | null; + + /** + * An upper bound for the number of tokens that can be generated for a response, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + max_output_tokens?: number | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * Whether to allow the model to run tool calls in parallel. + */ + parallel_tool_calls?: boolean | null; + + /** + * The unique ID of the previous response to the model. Use this to create + * multi-turn conversations. Learn more about + * [conversation state](https://platform.openai.com/docs/guides/conversation-state). + */ + previous_response_id?: string | null; + + /** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ + reasoning?: Shared.Reasoning | null; + + /** + * Whether to store the generated model response for later retrieval via API. + */ + store?: boolean | null; + + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream?: boolean | null; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. + */ + temperature?: number | null; + + /** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ + text?: ResponseTextConfig; + + /** + * How the model should select which tool (or tools) to use when generating a + * response. See the `tools` parameter to see how to specify which tools the model + * can call. + */ + tool_choice?: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction; + + /** + * An array of tools the model may call while generating a response. You can + * specify which tool to use by setting the `tool_choice` parameter. + * + * The two categories of tools you can provide the model are: + * + * - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + * capabilities, like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search). + * Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * - **Function calls (custom tools)**: Functions that are defined by you, enabling + * the model to call your own code. Learn more about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ + tools?: Array; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p?: number | null; + + /** + * The truncation strategy to use for the model response. + * + * - `auto`: If the context of this response and previous ones exceeds the model's + * context window size, the model will truncate the response to fit the context + * window by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size + * for a model, the request will fail with a 400 error. + */ + truncation?: 'auto' | 'disabled' | null; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + user?: string; +} + +export namespace ResponseCreateParams { + export type ResponseCreateParamsNonStreaming = ResponsesAPI.ResponseCreateParamsNonStreaming; + export type ResponseCreateParamsStreaming = ResponsesAPI.ResponseCreateParamsStreaming; +} + +export interface ResponseCreateParamsNonStreaming extends ResponseCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream?: false | null; +} + +export interface ResponseCreateParamsStreaming extends ResponseCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream: true; +} + +export interface ResponseRetrieveParams { + /** + * Additional fields to include in the response. See the `include` parameter for + * Response creation above for more information. + */ + include?: Array; +} + +Responses.InputItems = InputItems; +Responses.ResponseItemListDataPage = ResponseItemListDataPage; + +export declare namespace Responses { + export { + InputItems as InputItems, + type ResponseItemList as ResponseItemList, + ResponseItemListDataPage as ResponseItemListDataPage, + type InputItemListParams as InputItemListParams, + }; +} diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 3bb11582f..86b2d2dee 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,5 +1,96 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export type ChatModel = + | 'o3-mini' + | 'o3-mini-2025-01-31' + | 'o1' + | 'o1-2024-12-17' + | 'o1-preview' + | 'o1-preview-2024-09-12' + | 'o1-mini' + | 'o1-mini-2024-09-12' + | 'computer-use-preview' + | 'computer-use-preview-2025-02-04' + | 'computer-use-preview-2025-03-11' + | 'gpt-4.5-preview' + | 'gpt-4.5-preview-2025-02-27' + | 'gpt-4o' + | 'gpt-4o-2024-11-20' + | 'gpt-4o-2024-08-06' + | 'gpt-4o-2024-05-13' + | 'gpt-4o-audio-preview' + | 'gpt-4o-audio-preview-2024-10-01' + | 'gpt-4o-audio-preview-2024-12-17' + | 'gpt-4o-mini-audio-preview' + | 'gpt-4o-mini-audio-preview-2024-12-17' + | 'chatgpt-4o-latest' + | 'gpt-4o-mini' + | 'gpt-4o-mini-2024-07-18' + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0301' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613'; + +/** + * A filter used to compare a specified attribute key to a given value using a + * defined comparison operation. + */ +export interface ComparisonFilter { + /** + * The key to compare against the value. + */ + key: string; + + /** + * Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + * + * - `eq`: equals + * - `ne`: not equal + * - `gt`: greater than + * - `gte`: greater than or equal + * - `lt`: less than + * - `lte`: less than or equal + */ + type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte'; + + /** + * The value to compare against the attribute key; supports string, number, or + * boolean types. + */ + value: string | number | boolean; +} + +/** + * Combine multiple filters using `and` or `or`. + */ +export interface CompoundFilter { + /** + * Array of filters to combine. Items can be `ComparisonFilter` or + * `CompoundFilter`. + */ + filters: Array; + + /** + * Type of operation: `and` or `or`. + */ + type: 'and' | 'or'; +} + export interface ErrorObject { code: string | null; @@ -65,23 +156,76 @@ export type FunctionParameters = Record; */ export type Metadata = Record; +/** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ +export interface Reasoning { + /** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + effort: ReasoningEffort | null; + + /** + * **o-series models only** + * + * A summary of the reasoning performed by the model. This can be useful for + * debugging and understanding the model's reasoning process. One of `concise` or + * `detailed`. + */ + generate_summary?: 'concise' | 'detailed' | null; +} + +/** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ +export type ReasoningEffort = 'low' | 'medium' | 'high' | null; + +/** + * JSON object response format. An older method of generating JSON responses. Using + * `json_schema` is recommended for models that support it. Note that the model + * will not generate JSON without a system or user message instructing it to do so. + */ export interface ResponseFormatJSONObject { /** - * The type of response format being defined: `json_object` + * The type of response format being defined. Always `json_object`. */ type: 'json_object'; } +/** + * JSON Schema response format. Used to generate structured JSON responses. Learn + * more about + * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + */ export interface ResponseFormatJSONSchema { + /** + * Structured Outputs configuration options, including a JSON Schema. + */ json_schema: ResponseFormatJSONSchema.JSONSchema; /** - * The type of response format being defined: `json_schema` + * The type of response format being defined. Always `json_schema`. */ type: 'json_schema'; } export namespace ResponseFormatJSONSchema { + /** + * Structured Outputs configuration options, including a JSON Schema. + */ export interface JSONSchema { /** * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores @@ -96,7 +240,8 @@ export namespace ResponseFormatJSONSchema { description?: string; /** - * The schema for the response format, described as a JSON Schema object. + * The schema for the response format, described as a JSON Schema object. Learn how + * to build JSON schemas [here](https://json-schema.org/). */ schema?: Record; @@ -111,9 +256,12 @@ export namespace ResponseFormatJSONSchema { } } +/** + * Default response format. Used to generate text responses. + */ export interface ResponseFormatText { /** - * The type of response format being defined: `text` + * The type of response format being defined. Always `text`. */ type: 'text'; } diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index f977e18f6..9e046b48d 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -22,10 +22,9 @@ export class Uploads extends APIResource { * contains all the parts you uploaded. This File is usable in the rest of our * platform as a regular File object. * - * For certain `purpose`s, the correct `mime_type` must be specified. Please refer - * to documentation for the supported MIME types for your use case: - * - * - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files) + * For certain `purpose` values, the correct `mime_type` must be specified. Please + * refer to documentation for the + * [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). * * For guidance on the proper filename extensions for each purpose, please follow * the documentation on diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts similarity index 92% rename from src/resources/beta/vector-stores/file-batches.ts rename to src/resources/vector-stores/file-batches.ts index 2c47cb9c2..9be1d81a3 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/vector-stores/file-batches.ts @@ -1,15 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { isRequestOptions } from '../../../core'; -import { sleep } from '../../../core'; -import { Uploadable } from '../../../core'; -import { allSettledWithThrow } from '../../../lib/Util'; -import * as Core from '../../../core'; +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import { sleep } from '../../core'; +import { Uploadable } from '../../core'; +import { allSettledWithThrow } from '../../lib/Util'; +import * as Core from '../../core'; import * as FilesAPI from './files'; import { VectorStoreFilesPage } from './files'; import * as VectorStoresAPI from './vector-stores'; -import { type CursorPageParams } from '../../../pagination'; +import { type CursorPageParams } from '../../pagination'; export class FileBatches extends APIResource { /** @@ -265,6 +265,15 @@ export interface FileBatchCreateParams { */ file_ids: Array; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/vector-stores/files.ts similarity index 74% rename from src/resources/beta/vector-stores/files.ts rename to src/resources/vector-stores/files.ts index 1fda9a99b..28caf9781 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/vector-stores/files.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { sleep, Uploadable, isRequestOptions } from '../../../core'; -import * as Core from '../../../core'; +import { APIResource } from '../../resource'; +import { sleep, Uploadable, isRequestOptions } from '../../core'; +import * as Core from '../../core'; import * as VectorStoresAPI from './vector-stores'; -import { CursorPage, type CursorPageParams } from '../../../pagination'; +import { CursorPage, type CursorPageParams, Page } from '../../pagination'; export class Files extends APIResource { /** @@ -38,6 +38,22 @@ export class Files extends APIResource { }); } + /** + * Update attributes on a vector store file. + */ + update( + vectorStoreId: string, + fileId: string, + body: FileUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/vector_stores/${vectorStoreId}/files/${fileId}`, { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + /** * Returns a list of vector store files. */ @@ -167,10 +183,30 @@ export class Files extends APIResource { const fileInfo = await this.upload(vectorStoreId, file, options); return await this.poll(vectorStoreId, fileInfo.id, options); } + + /** + * Retrieve the parsed contents of a vector store file. + */ + content( + vectorStoreId: string, + fileId: string, + options?: Core.RequestOptions, + ): Core.PagePromise { + return this._client.getAPIList( + `/vector_stores/${vectorStoreId}/files/${fileId}/content`, + FileContentResponsesPage, + { ...options, headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers } }, + ); + } } export class VectorStoreFilesPage extends CursorPage {} +/** + * Note: no pagination actually occurs yet, this is for forwards-compatibility. + */ +export class FileContentResponsesPage extends Page {} + /** * A list of files attached to a vector store. */ @@ -217,6 +253,15 @@ export interface VectorStoreFile { */ vector_store_id: string; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The strategy used to chunk the file. */ @@ -249,6 +294,18 @@ export interface VectorStoreFileDeleted { object: 'vector_store.file.deleted'; } +export interface FileContentResponse { + /** + * The text content + */ + text?: string; + + /** + * The content type (currently only `"text"`) + */ + type?: string; +} + export interface FileCreateParams { /** * A [File](https://platform.openai.com/docs/api-reference/files) ID that the @@ -257,6 +314,15 @@ export interface FileCreateParams { */ file_id: string; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. @@ -264,6 +330,17 @@ export interface FileCreateParams { chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; } +export interface FileUpdateParams { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes: Record | null; +} + export interface FileListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place @@ -286,13 +363,17 @@ export interface FileListParams extends CursorPageParams { } Files.VectorStoreFilesPage = VectorStoreFilesPage; +Files.FileContentResponsesPage = FileContentResponsesPage; export declare namespace Files { export { type VectorStoreFile as VectorStoreFile, type VectorStoreFileDeleted as VectorStoreFileDeleted, + type FileContentResponse as FileContentResponse, VectorStoreFilesPage as VectorStoreFilesPage, + FileContentResponsesPage as FileContentResponsesPage, type FileCreateParams as FileCreateParams, + type FileUpdateParams as FileUpdateParams, type FileListParams as FileListParams, }; } diff --git a/src/resources/beta/vector-stores/index.ts b/src/resources/vector-stores/index.ts similarity index 82% rename from src/resources/beta/vector-stores/index.ts rename to src/resources/vector-stores/index.ts index d587bd160..9cbcbc0b2 100644 --- a/src/resources/beta/vector-stores/index.ts +++ b/src/resources/vector-stores/index.ts @@ -8,14 +8,18 @@ export { } from './file-batches'; export { VectorStoreFilesPage, + FileContentResponsesPage, Files, type VectorStoreFile, type VectorStoreFileDeleted, + type FileContentResponse, type FileCreateParams, + type FileUpdateParams, type FileListParams, } from './files'; export { VectorStoresPage, + VectorStoreSearchResponsesPage, VectorStores, type AutoFileChunkingStrategyParam, type FileChunkingStrategy, @@ -26,7 +30,9 @@ export { type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, + type VectorStoreSearchResponse, type VectorStoreCreateParams, type VectorStoreUpdateParams, type VectorStoreListParams, + type VectorStoreSearchParams, } from './vector-stores'; diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts similarity index 77% rename from src/resources/beta/vector-stores/vector-stores.ts rename to src/resources/vector-stores/vector-stores.ts index 8438b79da..7d61e7fd6 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { isRequestOptions } from '../../../core'; -import * as Core from '../../../core'; -import * as Shared from '../../shared'; +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as Shared from '../shared'; import * as FileBatchesAPI from './file-batches'; import { FileBatchCreateParams, @@ -13,14 +13,17 @@ import { } from './file-batches'; import * as FilesAPI from './files'; import { + FileContentResponse, + FileContentResponsesPage, FileCreateParams, FileListParams, + FileUpdateParams, Files, VectorStoreFile, VectorStoreFileDeleted, VectorStoreFilesPage, } from './files'; -import { CursorPage, type CursorPageParams } from '../../../pagination'; +import { CursorPage, type CursorPageParams, Page } from '../../pagination'; export class VectorStores extends APIResource { files: FilesAPI.Files = new FilesAPI.Files(this._client); @@ -93,10 +96,32 @@ export class VectorStores extends APIResource { headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } + + /** + * Search a vector store for relevant chunks based on a query and file attributes + * filter. + */ + search( + vectorStoreId: string, + body: VectorStoreSearchParams, + options?: Core.RequestOptions, + ): Core.PagePromise { + return this._client.getAPIList(`/vector_stores/${vectorStoreId}/search`, VectorStoreSearchResponsesPage, { + body, + method: 'post', + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } } export class VectorStoresPage extends CursorPage {} +/** + * Note: no pagination actually occurs yet, this is for forwards-compatibility. + */ +export class VectorStoreSearchResponsesPage extends Page {} + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of * `800` and `chunk_overlap_tokens` of `400`. @@ -155,6 +180,9 @@ export interface StaticFileChunkingStrategyObject { type: 'static'; } +/** + * Customize your own chunking strategy by setting chunk size and chunk overlap. + */ export interface StaticFileChunkingStrategyObjectParam { static: StaticFileChunkingStrategy; @@ -282,6 +310,51 @@ export interface VectorStoreDeleted { object: 'vector_store.deleted'; } +export interface VectorStoreSearchResponse { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes: Record | null; + + /** + * Content chunks from the file. + */ + content: Array; + + /** + * The ID of the vector store file. + */ + file_id: string; + + /** + * The name of the vector store file. + */ + filename: string; + + /** + * The similarity score for the result. + */ + score: number; +} + +export namespace VectorStoreSearchResponse { + export interface Content { + /** + * The text content returned from search. + */ + text: string; + + /** + * The type of content. + */ + type: 'text'; + } +} + export interface VectorStoreCreateParams { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` @@ -391,9 +464,50 @@ export interface VectorStoreListParams extends CursorPageParams { order?: 'asc' | 'desc'; } +export interface VectorStoreSearchParams { + /** + * A query string for a search + */ + query: string | Array; + + /** + * A filter to apply based on file attributes. + */ + filters?: Shared.ComparisonFilter | Shared.CompoundFilter; + + /** + * The maximum number of results to return. This number should be between 1 and 50 + * inclusive. + */ + max_num_results?: number; + + /** + * Ranking options for search. + */ + ranking_options?: VectorStoreSearchParams.RankingOptions; + + /** + * Whether to rewrite the natural language query for vector search. + */ + rewrite_query?: boolean; +} + +export namespace VectorStoreSearchParams { + /** + * Ranking options for search. + */ + export interface RankingOptions { + ranker?: 'auto' | 'default-2024-11-15'; + + score_threshold?: number; + } +} + VectorStores.VectorStoresPage = VectorStoresPage; +VectorStores.VectorStoreSearchResponsesPage = VectorStoreSearchResponsesPage; VectorStores.Files = Files; VectorStores.VectorStoreFilesPage = VectorStoreFilesPage; +VectorStores.FileContentResponsesPage = FileContentResponsesPage; VectorStores.FileBatches = FileBatches; export declare namespace VectorStores { @@ -407,18 +521,24 @@ export declare namespace VectorStores { type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreSearchResponse as VectorStoreSearchResponse, VectorStoresPage as VectorStoresPage, + VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, type VectorStoreCreateParams as VectorStoreCreateParams, type VectorStoreUpdateParams as VectorStoreUpdateParams, type VectorStoreListParams as VectorStoreListParams, + type VectorStoreSearchParams as VectorStoreSearchParams, }; export { Files as Files, type VectorStoreFile as VectorStoreFile, type VectorStoreFileDeleted as VectorStoreFileDeleted, + type FileContentResponse as FileContentResponse, VectorStoreFilesPage as VectorStoreFilesPage, + FileContentResponsesPage as FileContentResponsesPage, type FileCreateParams as FileCreateParams, + type FileUpdateParams as FileUpdateParams, type FileListParams as FileListParams, }; diff --git a/src/streaming.ts b/src/streaming.ts index 52266154c..25b960314 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -41,7 +41,7 @@ export class Stream implements AsyncIterable { continue; } - if (sse.event === null) { + if (sse.event === null || sse.event.startsWith('response.')) { let data; try { diff --git a/tests/api-resources/chat/completions/completions.test.ts b/tests/api-resources/chat/completions/completions.test.ts index acdd631db..eddf252b1 100644 --- a/tests/api-resources/chat/completions/completions.test.ts +++ b/tests/api-resources/chat/completions/completions.test.ts @@ -43,9 +43,9 @@ describe('resource completions', () => { presence_penalty: -2, reasoning_effort: 'low', response_format: { type: 'text' }, - seed: 0, + seed: -9007199254740991, service_tier: 'auto', - stop: 'string', + stop: '\n', store: true, stream: false, stream_options: { include_usage: true }, @@ -60,6 +60,13 @@ describe('resource completions', () => { top_logprobs: 0, top_p: 1, user: 'user-1234', + web_search_options: { + search_context_size: 'low', + user_location: { + approximate: { city: 'city', country: 'country', region: 'region', timezone: 'timezone' }, + type: 'approximate', + }, + }, }); }); diff --git a/tests/api-resources/responses/input-items.test.ts b/tests/api-resources/responses/input-items.test.ts new file mode 100644 index 000000000..51b86f1b3 --- /dev/null +++ b/tests/api-resources/responses/input-items.test.ts @@ -0,0 +1,40 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource inputItems', () => { + test('list', async () => { + const responsePromise = client.responses.inputItems.list('response_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.inputItems.list('response_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.inputItems.list( + 'response_id', + { after: 'after', before: 'before', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/beta/vector-stores/files.test.ts b/tests/api-resources/responses/responses.test.ts similarity index 58% rename from tests/api-resources/beta/vector-stores/files.test.ts rename to tests/api-resources/responses/responses.test.ts index 7c14d4de3..e10722738 100644 --- a/tests/api-resources/beta/vector-stores/files.test.ts +++ b/tests/api-resources/responses/responses.test.ts @@ -8,9 +8,9 @@ const client = new OpenAI({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); -describe('resource files', () => { +describe('resource responses', () => { test('create: only required params', async () => { - const responsePromise = client.beta.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); + const responsePromise = client.responses.create({ input: 'string', model: 'gpt-4o' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,14 +21,38 @@ describe('resource files', () => { }); test('create: required and optional params', async () => { - const response = await client.beta.vectorStores.files.create('vs_abc123', { - file_id: 'file_id', - chunking_strategy: { type: 'auto' }, + const response = await client.responses.create({ + input: 'string', + model: 'gpt-4o', + include: ['file_search_call.results'], + instructions: 'instructions', + max_output_tokens: 0, + metadata: { foo: 'string' }, + parallel_tool_calls: true, + previous_response_id: 'previous_response_id', + reasoning: { effort: 'low', generate_summary: 'concise' }, + store: true, + stream: false, + temperature: 1, + text: { format: { type: 'text' } }, + tool_choice: 'none', + tools: [ + { + type: 'file_search', + vector_store_ids: ['string'], + filters: { key: 'key', type: 'eq', value: 'string' }, + max_num_results: 0, + ranking_options: { ranker: 'auto', score_threshold: 0 }, + }, + ], + top_p: 1, + truncation: 'auto', + user: 'user-1234', }); }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123'); + const responsePromise = client.responses.retrieve('resp_677efb5139a88190b512bc3fef8e535d'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -41,43 +65,25 @@ describe('resource files', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123', { + client.responses.retrieve('resp_677efb5139a88190b512bc3fef8e535d', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); - test('list', async () => { - const responsePromise = client.beta.vectorStores.files.list('vector_store_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('list: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.beta.vectorStores.files.list('vector_store_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(OpenAI.NotFoundError); - }); - - test('list: request options and params are passed correctly', async () => { + test('retrieve: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.list( - 'vector_store_id', - { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, + client.responses.retrieve( + 'resp_677efb5139a88190b512bc3fef8e535d', + { include: ['file_search_call.results'] }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); }); test('del', async () => { - const responsePromise = client.beta.vectorStores.files.del('vector_store_id', 'file_id'); + const responsePromise = client.responses.del('resp_677efb5139a88190b512bc3fef8e535d'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -90,7 +96,7 @@ describe('resource files', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.del('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + client.responses.del('resp_677efb5139a88190b512bc3fef8e535d', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/beta/vector-stores/file-batches.test.ts b/tests/api-resources/vector-stores/file-batches.test.ts similarity index 81% rename from tests/api-resources/beta/vector-stores/file-batches.test.ts rename to tests/api-resources/vector-stores/file-batches.test.ts index b714049b4..c0447a838 100644 --- a/tests/api-resources/beta/vector-stores/file-batches.test.ts +++ b/tests/api-resources/vector-stores/file-batches.test.ts @@ -10,9 +10,7 @@ const client = new OpenAI({ describe('resource fileBatches', () => { test('create: only required params', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.create('vs_abc123', { - file_ids: ['string'], - }); + const responsePromise = client.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'] }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -23,14 +21,15 @@ describe('resource fileBatches', () => { }); test('create: required and optional params', async () => { - const response = await client.beta.vectorStores.fileBatches.create('vs_abc123', { + const response = await client.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'], + attributes: { foo: 'string' }, chunking_strategy: { type: 'auto' }, }); }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123'); + const responsePromise = client.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -43,14 +42,14 @@ describe('resource fileBatches', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123', { + client.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('cancel', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); + const responsePromise = client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -63,14 +62,14 @@ describe('resource fileBatches', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { + client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('listFiles', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); + const responsePromise = client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -83,7 +82,7 @@ describe('resource fileBatches', () => { test('listFiles: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { + client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); @@ -92,7 +91,7 @@ describe('resource fileBatches', () => { test('listFiles: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.listFiles( + client.vectorStores.fileBatches.listFiles( 'vector_store_id', 'batch_id', { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, diff --git a/tests/api-resources/vector-stores/files.test.ts b/tests/api-resources/vector-stores/files.test.ts new file mode 100644 index 000000000..86a8f9bb4 --- /dev/null +++ b/tests/api-resources/vector-stores/files.test.ts @@ -0,0 +1,132 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource files', () => { + test('create: only required params', async () => { + const responsePromise = client.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.vectorStores.files.create('vs_abc123', { + file_id: 'file_id', + attributes: { foo: 'string' }, + chunking_strategy: { type: 'auto' }, + }); + }); + + test('retrieve', async () => { + const responsePromise = client.vectorStores.files.retrieve('vs_abc123', 'file-abc123'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.retrieve('vs_abc123', 'file-abc123', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('update: only required params', async () => { + const responsePromise = client.vectorStores.files.update('vs_abc123', 'file-abc123', { + attributes: { foo: 'string' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('update: required and optional params', async () => { + const response = await client.vectorStores.files.update('vs_abc123', 'file-abc123', { + attributes: { foo: 'string' }, + }); + }); + + test('list', async () => { + const responsePromise = client.vectorStores.files.list('vector_store_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.list('vector_store_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.list( + 'vector_store_id', + { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.vectorStores.files.del('vector_store_id', 'file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.del('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('content', async () => { + const responsePromise = client.vectorStores.files.content('vs_abc123', 'file-abc123'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('content: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.content('vs_abc123', 'file-abc123', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/beta/vector-stores/vector-stores.test.ts b/tests/api-resources/vector-stores/vector-stores.test.ts similarity index 71% rename from tests/api-resources/beta/vector-stores/vector-stores.test.ts rename to tests/api-resources/vector-stores/vector-stores.test.ts index 806098de8..465904a00 100644 --- a/tests/api-resources/beta/vector-stores/vector-stores.test.ts +++ b/tests/api-resources/vector-stores/vector-stores.test.ts @@ -10,7 +10,7 @@ const client = new OpenAI({ describe('resource vectorStores', () => { test('create', async () => { - const responsePromise = client.beta.vectorStores.create({}); + const responsePromise = client.vectorStores.create({}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,7 +21,7 @@ describe('resource vectorStores', () => { }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.retrieve('vector_store_id'); + const responsePromise = client.vectorStores.retrieve('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -34,12 +34,12 @@ describe('resource vectorStores', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.retrieve('vector_store_id', { path: '/_stainless_unknown_path' }), + client.vectorStores.retrieve('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = client.beta.vectorStores.update('vector_store_id', {}); + const responsePromise = client.vectorStores.update('vector_store_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -50,7 +50,7 @@ describe('resource vectorStores', () => { }); test('list', async () => { - const responsePromise = client.beta.vectorStores.list(); + const responsePromise = client.vectorStores.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -62,7 +62,7 @@ describe('resource vectorStores', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.beta.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); @@ -70,7 +70,7 @@ describe('resource vectorStores', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.list( + client.vectorStores.list( { after: 'after', before: 'before', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), @@ -78,7 +78,7 @@ describe('resource vectorStores', () => { }); test('del', async () => { - const responsePromise = client.beta.vectorStores.del('vector_store_id'); + const responsePromise = client.vectorStores.del('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -91,7 +91,28 @@ describe('resource vectorStores', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.del('vector_store_id', { path: '/_stainless_unknown_path' }), + client.vectorStores.del('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); + + test('search: only required params', async () => { + const responsePromise = client.vectorStores.search('vs_abc123', { query: 'string' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('search: required and optional params', async () => { + const response = await client.vectorStores.search('vs_abc123', { + query: 'string', + filters: { key: 'key', type: 'eq', value: 'string' }, + max_num_results: 1, + ranking_options: { ranker: 'auto', score_threshold: 0 }, + rewrite_query: true, + }); + }); }); From b0930694021fb07c03782387cf3ba9d8df6fb975 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 16:32:03 +0000 Subject: [PATCH 435/533] release: 4.87.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a889d24b4..e8984a56c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.86.2" + ".": "4.87.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 38d54fdc1..2ec7edb2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.87.0 (2025-03-11) + +Full Changelog: [v4.86.2...v4.87.0](https://github.com/openai/openai-node/compare/v4.86.2...v4.87.0) + +### Features + +* **api:** add /v1/responses and built-in tools ([119b584](https://github.com/openai/openai-node/commit/119b5843a18b8014167c8d2031d75c08dbf400a3)) + ## 4.86.2 (2025-03-05) Full Changelog: [v4.86.1...v4.86.2](https://github.com/openai/openai-node/compare/v4.86.1...v4.86.2) diff --git a/jsr.json b/jsr.json index 1c0948aaa..4ac1601e7 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.86.2", + "version": "4.87.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 78afb8946..7cf4a385d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.86.2", + "version": "4.87.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c43a3c320..2b1fd6541 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.86.2'; // x-release-please-version +export const VERSION = '4.87.0'; // x-release-please-version From 21f210782b1ee3b33231cfed0277ab8e3a764bcb Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 11 Mar 2025 12:42:29 -0400 Subject: [PATCH 436/533] fix: correct imports --- src/lib/ResponsesParser.ts | 2 +- src/lib/responses/ResponseStream.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/ResponsesParser.ts b/src/lib/ResponsesParser.ts index 780b779ff..8d762d5bb 100644 --- a/src/lib/ResponsesParser.ts +++ b/src/lib/ResponsesParser.ts @@ -1,5 +1,5 @@ import { OpenAIError } from '../error'; -import { type ChatCompletionTool } from '../resources'; +import type { ChatCompletionTool } from '../resources/chat/completions'; import { type FunctionTool, type ParsedContent, diff --git a/src/lib/responses/ResponseStream.ts b/src/lib/responses/ResponseStream.ts index 0d6cd47dd..d2ee80a75 100644 --- a/src/lib/responses/ResponseStream.ts +++ b/src/lib/responses/ResponseStream.ts @@ -4,7 +4,7 @@ import { type ResponseCreateParamsBase, type ResponseCreateParamsStreaming, type ResponseStreamEvent, -} from 'openai/resources/responses/responses'; +} from '../../resources/responses/responses'; import * as Core from '../../core'; import { APIUserAbortError, OpenAIError } from '../../error'; import OpenAI from '../../index'; From 0bc08d15143f63536b2331a174bec3d0411a7356 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 16:45:57 +0000 Subject: [PATCH 437/533] release: 4.87.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e8984a56c..dab137bc4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.87.0" + ".": "4.87.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ec7edb2b..46477c290 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.87.1 (2025-03-11) + +Full Changelog: [v4.87.0...v4.87.1](https://github.com/openai/openai-node/compare/v4.87.0...v4.87.1) + +### Bug Fixes + +* correct imports ([5cdf17c](https://github.com/openai/openai-node/commit/5cdf17cec33da7cf540b8bdbcfa30c0c52842dd1)) + ## 4.87.0 (2025-03-11) Full Changelog: [v4.86.2...v4.87.0](https://github.com/openai/openai-node/compare/v4.86.2...v4.87.0) diff --git a/jsr.json b/jsr.json index 4ac1601e7..beb9f5c47 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.87.0", + "version": "4.87.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 7cf4a385d..386015e42 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.87.0", + "version": "4.87.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 2b1fd6541..35d1d1c0f 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.87.0'; // x-release-please-version +export const VERSION = '4.87.1'; // x-release-please-version From 8ae07cc036895529a028134451fe2ab5c1661871 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 11 Mar 2025 13:55:38 -0400 Subject: [PATCH 438/533] fix(responses): correctly add output_text --- src/resources/responses/responses.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 2ad146873..060147a2b 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -79,7 +79,7 @@ export class Responses extends APIResource { | APIPromise | APIPromise> )._thenUnwrap((rsp) => { - if ('type' in rsp && rsp.type === 'response') { + if ('object' in rsp && rsp.object === 'response') { addOutputText(rsp as Response); } From 0b33959ed9d911c73b7ea4935761c702266bec6c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 17:56:33 +0000 Subject: [PATCH 439/533] release: 4.87.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index dab137bc4..464f20492 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.87.1" + ".": "4.87.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 46477c290..4a4cb5036 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.87.2 (2025-03-11) + +Full Changelog: [v4.87.1...v4.87.2](https://github.com/openai/openai-node/compare/v4.87.1...v4.87.2) + +### Bug Fixes + +* **responses:** correctly add output_text ([4ceb5cc](https://github.com/openai/openai-node/commit/4ceb5cc516b8c75d46f0042534d7658796a8cd71)) + ## 4.87.1 (2025-03-11) Full Changelog: [v4.87.0...v4.87.1](https://github.com/openai/openai-node/compare/v4.87.0...v4.87.1) diff --git a/jsr.json b/jsr.json index beb9f5c47..b6857cfb0 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.87.1", + "version": "4.87.2", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 386015e42..2fbc060df 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.87.1", + "version": "4.87.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 35d1d1c0f..854c6827d 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.87.1'; // x-release-please-version +export const VERSION = '4.87.2'; // x-release-please-version From 9cb95763cab5678c5098b37ad0fe1ec83d2c1cb7 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 11 Mar 2025 17:36:01 -0400 Subject: [PATCH 440/533] fix(responses): correct reasoning output type --- api.md | 1 + src/resources/responses/responses.ts | 131 +++++++++------------------ 2 files changed, 45 insertions(+), 87 deletions(-) diff --git a/api.md b/api.md index b21ac2d5f..2fac07f38 100644 --- a/api.md +++ b/api.md @@ -583,6 +583,7 @@ Types: - ResponseOutputMessage - ResponseOutputRefusal - ResponseOutputText +- ResponseReasoningItem - ResponseRefusalDeltaEvent - ResponseRefusalDoneEvent - ResponseStatus diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 060147a2b..72adf0696 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -36,7 +36,7 @@ export type ParsedResponseOutputItem = | ResponseFileSearchToolCall | ResponseFunctionWebSearch | ResponseComputerToolCall - | ResponseOutputItem.Reasoning; + | ResponseReasoningItem; export interface ParsedResponse extends Response { output: Array>; @@ -1562,7 +1562,7 @@ export type ResponseInputItem = | ResponseFunctionWebSearch | ResponseFunctionToolCall | ResponseInputItem.FunctionCallOutput - | ResponseInputItem.Reasoning + | ResponseReasoningItem | ResponseInputItem.ItemReference; export namespace ResponseInputItem { @@ -1707,47 +1707,6 @@ export namespace ResponseInputItem { status?: 'in_progress' | 'completed' | 'incomplete'; } - /** - * A description of the chain of thought used by a reasoning model while generating - * a response. - */ - export interface Reasoning { - /** - * The unique identifier of the reasoning content. - */ - id: string; - - /** - * Reasoning text contents. - */ - content: Array; - - /** - * The type of the object. Always `reasoning`. - */ - type: 'reasoning'; - - /** - * The status of the item. One of `in_progress`, `completed`, or `incomplete`. - * Populated when items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - } - - export namespace Reasoning { - export interface Content { - /** - * A short summary of the reasoning used by the model when generating the response. - */ - text: string; - - /** - * The type of the object. Always `text`. - */ - type: 'reasoning_summary'; - } - } - /** * An internal identifier for an item to reference. */ @@ -1814,50 +1773,7 @@ export type ResponseOutputItem = | ResponseFunctionToolCall | ResponseFunctionWebSearch | ResponseComputerToolCall - | ResponseOutputItem.Reasoning; - -export namespace ResponseOutputItem { - /** - * A description of the chain of thought used by a reasoning model while generating - * a response. - */ - export interface Reasoning { - /** - * The unique identifier of the reasoning content. - */ - id: string; - - /** - * Reasoning text contents. - */ - content: Array; - - /** - * The type of the object. Always `reasoning`. - */ - type: 'reasoning'; - - /** - * The status of the item. One of `in_progress`, `completed`, or `incomplete`. - * Populated when items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - } - - export namespace Reasoning { - export interface Content { - /** - * A short summary of the reasoning used by the model when generating the response. - */ - text: string; - - /** - * The type of the object. Always `text`. - */ - type: 'reasoning_summary'; - } - } -} + | ResponseReasoningItem; /** * Emitted when a new output item is added. @@ -2039,6 +1955,47 @@ export namespace ResponseOutputText { } } +/** + * A description of the chain of thought used by a reasoning model while generating + * a response. + */ +export interface ResponseReasoningItem { + /** + * The unique identifier of the reasoning content. + */ + id: string; + + /** + * Reasoning text contents. + */ + summary: Array; + + /** + * The type of the object. Always `reasoning`. + */ + type: 'reasoning'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +export namespace ResponseReasoningItem { + export interface Summary { + /** + * A short summary of the reasoning used by the model when generating the response. + */ + text: string; + + /** + * The type of the object. Always `summary_text`. + */ + type: 'summary_text'; + } +} + /** * Emitted when there is a partial refusal text. */ From be2414ce22517e3259192c751de55744649deec1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 21:40:08 +0000 Subject: [PATCH 441/533] release: 4.87.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 464f20492..0c7a85094 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.87.2" + ".": "4.87.3" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a4cb5036..46a595495 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.87.3 (2025-03-11) + +Full Changelog: [v4.87.2...v4.87.3](https://github.com/openai/openai-node/compare/v4.87.2...v4.87.3) + +### Bug Fixes + +* **responses:** correct reasoning output type ([2abef57](https://github.com/openai/openai-node/commit/2abef57d7645a96a4b9a6b91483861cd568d2d4d)) + ## 4.87.2 (2025-03-11) Full Changelog: [v4.87.1...v4.87.2](https://github.com/openai/openai-node/compare/v4.87.1...v4.87.2) diff --git a/jsr.json b/jsr.json index b6857cfb0..1051fade0 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.87.2", + "version": "4.87.3", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 2fbc060df..9967a814d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.87.2", + "version": "4.87.3", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 854c6827d..e84192528 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.87.2'; // x-release-please-version +export const VERSION = '4.87.3'; // x-release-please-version From e905c95a27213ee65210b061ead4c982de01648b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 14 Mar 2025 19:24:51 +0000 Subject: [PATCH 442/533] chore(internal): remove CI condition (#1381) --- .github/workflows/ci.yml | 5 ++- .github/workflows/create-releases.yml | 50 --------------------------- .github/workflows/publish-jsr.yml | 8 +++-- .github/workflows/publish-npm.yml | 8 +++-- .github/workflows/release-doctor.yml | 1 - .stats.yml | 2 +- bin/check-release-environment | 4 --- examples/yarn.lock | 0 8 files changed, 15 insertions(+), 63 deletions(-) delete mode 100644 .github/workflows/create-releases.yml delete mode 100644 examples/yarn.lock diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fe24c0dcb..3efb3f17a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ jobs: lint: name: lint runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' + steps: - uses: actions/checkout@v4 @@ -31,7 +31,7 @@ jobs: build: name: build runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' + steps: - uses: actions/checkout@v4 @@ -49,7 +49,6 @@ jobs: test: name: test runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml deleted file mode 100644 index 19b7dd831..000000000 --- a/.github/workflows/create-releases.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Create releases -on: - schedule: - - cron: '0 5 * * *' # every day at 5am UTC - push: - branches: - - master - -jobs: - release: - name: release - if: github.ref == 'refs/heads/master' && github.repository == 'openai/openai-node' - runs-on: ubuntu-latest - environment: publish - permissions: - contents: read - id-token: write - - steps: - - uses: actions/checkout@v4 - - - uses: stainless-api/trigger-release-please@v1 - id: release - with: - repo: ${{ github.event.repository.full_name }} - stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - - - name: Set up Node - if: ${{ steps.release.outputs.releases_created }} - uses: actions/setup-node@v3 - with: - node-version: '18' - - - name: Install dependencies - if: ${{ steps.release.outputs.releases_created }} - run: | - yarn install - - - name: Publish to NPM - if: ${{ steps.release.outputs.releases_created }} - run: | - bash ./bin/publish-npm - env: - NPM_TOKEN: ${{ secrets.OPENAI_NPM_TOKEN || secrets.NPM_TOKEN }} - - - name: Publish to JSR - if: ${{ steps.release.outputs.releases_created }} - run: | - bash ./bin/publish-jsr - diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml index 1e46d6bfb..dc5fe0a2a 100644 --- a/.github/workflows/publish-jsr.yml +++ b/.github/workflows/publish-jsr.yml @@ -1,9 +1,13 @@ -# workflow for re-running publishing to JSR in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-jsr.yml +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to JSR in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-jsr.yml name: Publish JSR on: workflow_dispatch: + release: + types: [published] + jobs: publish: name: publish diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index 5a3711b53..d3b184555 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -1,9 +1,13 @@ -# workflow for re-running publishing to NPM in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to NPM in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml name: Publish NPM on: workflow_dispatch: + release: + types: [published] + jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 37bc09e80..754a44931 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -19,6 +19,5 @@ jobs: run: | bash ./bin/check-release-environment env: - STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} NPM_TOKEN: ${{ secrets.OPENAI_NPM_TOKEN || secrets.NPM_TOKEN }} diff --git a/.stats.yml b/.stats.yml index 455874212..53c73037d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-be834d63e326a82494e819085137f5eb15866f3fc787db1f3afe7168d419e18a.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml diff --git a/bin/check-release-environment b/bin/check-release-environment index dbfd546bf..e51564b7d 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,10 +2,6 @@ errors=() -if [ -z "${STAINLESS_API_KEY}" ]; then - errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") -fi - if [ -z "${NPM_TOKEN}" ]; then errors+=("The OPENAI_NPM_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets") fi diff --git a/examples/yarn.lock b/examples/yarn.lock deleted file mode 100644 index e69de29bb..000000000 From 2cbf49a0b9a8cfbee29cec558c5ccdcebd72396f Mon Sep 17 00:00:00 2001 From: meorphis Date: Fri, 14 Mar 2025 16:53:35 -0400 Subject: [PATCH 443/533] chore(internal): update release workflows --- .github/workflows/publish-jsr.yml | 8 ++------ .github/workflows/publish-npm.yml | 8 ++------ .github/workflows/release-doctor.yml | 1 + 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml index dc5fe0a2a..1e46d6bfb 100644 --- a/.github/workflows/publish-jsr.yml +++ b/.github/workflows/publish-jsr.yml @@ -1,13 +1,9 @@ -# This workflow is triggered when a GitHub release is created. -# It can also be run manually to re-publish to JSR in case it failed for some reason. -# You can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-jsr.yml +# workflow for re-running publishing to JSR in case it fails for some reason +# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-jsr.yml name: Publish JSR on: workflow_dispatch: - release: - types: [published] - jobs: publish: name: publish diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index d3b184555..5a3711b53 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -1,13 +1,9 @@ -# This workflow is triggered when a GitHub release is created. -# It can also be run manually to re-publish to NPM in case it failed for some reason. -# You can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml +# workflow for re-running publishing to NPM in case it fails for some reason +# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml name: Publish NPM on: workflow_dispatch: - release: - types: [published] - jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 754a44931..37bc09e80 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -19,5 +19,6 @@ jobs: run: | bash ./bin/check-release-environment env: + STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} NPM_TOKEN: ${{ secrets.OPENAI_NPM_TOKEN || secrets.NPM_TOKEN }} From f4647cc7546d06145bf34113be22aabbd1b7e7ee Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 14:56:15 +0000 Subject: [PATCH 444/533] chore: add missing type alias exports (#1390) --- src/index.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/index.ts b/src/index.ts index c3abed2db..34cc3e84d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -118,6 +118,7 @@ import { ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, + ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStoreMessage, ChatCompletionStreamOptions, @@ -129,6 +130,7 @@ import { ChatCompletionUpdateParams, ChatCompletionUserMessageParam, ChatCompletionsPage, + CreateChatCompletionRequestMessage, } from './resources/chat/completions/completions'; export interface ClientOptions { @@ -404,6 +406,8 @@ export declare namespace OpenAI { type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, From 9c45ef37249e7db3ba8aa2e81886ffe306b95da4 Mon Sep 17 00:00:00 2001 From: meorphis <108296353+meorphis@users.noreply.github.com> Date: Tue, 18 Mar 2025 11:33:27 -0400 Subject: [PATCH 445/533] chore(internal): run CI on update-specs branch --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3efb3f17a..627f5954f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,6 +3,7 @@ on: push: branches: - master + - update-specs pull_request: branches: - master @@ -87,7 +88,6 @@ jobs: ecosystem_tests: name: ecosystem tests (v${{ matrix.node-version }}) runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' timeout-minutes: 20 strategy: fail-fast: false From e983d0c61d33b106f149d87eed90378bd0bbc349 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 17:54:03 +0000 Subject: [PATCH 446/533] fix(api): correct some Responses types (#1391) --- .stats.yml | 2 +- src/resources/batches.ts | 8 +++--- src/resources/chat/completions/completions.ts | 18 ++++++++----- src/resources/responses/responses.ts | 26 +++++++++++++++---- src/resources/shared.ts | 4 +-- tests/api-resources/batches.test.ts | 4 +-- 6 files changed, 42 insertions(+), 20 deletions(-) diff --git a/.stats.yml b/.stats.yml index 53c73037d..1e04d7c26 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f763c1a35c8b9b02f1e31b9b2e09e21f98bfe8413e5079c86cbb07da2dd7779b.yml diff --git a/src/resources/batches.ts b/src/resources/batches.ts index aadda83a6..2cf2ac566 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -220,11 +220,11 @@ export interface BatchCreateParams { /** * The endpoint to be used for all requests in the batch. Currently - * `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - * Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - * embedding inputs across all requests in the batch. + * `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + * are supported. Note that `/v1/embeddings` batches are also restricted to a + * maximum of 50,000 embedding inputs across all requests in the batch. */ - endpoint: '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions'; + endpoint: '/v1/responses' | '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions'; /** * The ID of an uploaded file that contains requests for the new batch. diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 7b1c353e2..f54c01597 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -377,10 +377,13 @@ export interface ChatCompletionChunk { /** * An optional field that will only be present when you set * `stream_options: {"include_usage": true}` in your request. When present, it - * contains a null value except for the last chunk which contains the token usage - * statistics for the entire request. + * contains a null value **except for the last chunk** which contains the token + * usage statistics for the entire request. + * + * **NOTE:** If the stream is interrupted or cancelled, you may not receive the + * final usage chunk which contains the total token usage for the request. */ - usage?: CompletionsAPI.CompletionUsage | null; + usage?: CompletionsAPI.CompletionUsage; } export namespace ChatCompletionChunk { @@ -551,7 +554,7 @@ export namespace ChatCompletionContentPart { /** * The name of the file, used when passing the file to the model as a string. */ - file_name?: string; + filename?: string; } } } @@ -930,8 +933,11 @@ export interface ChatCompletionStreamOptions { /** * If set, an additional chunk will be streamed before the `data: [DONE]` message. * The `usage` field on this chunk shows the token usage statistics for the entire - * request, and the `choices` field will always be an empty array. All other chunks - * will also include a `usage` field, but with a null value. + * request, and the `choices` field will always be an empty array. + * + * All other chunks will also include a `usage` field, but with a null value. + * **NOTE:** If the stream is interrupted, you may not receive the final usage + * chunk which contains the total token usage for the request. */ include_usage?: boolean; } diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 72adf0696..20d67b8ac 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -1362,11 +1362,6 @@ export interface ResponseFunctionCallArgumentsDoneEvent { * for more information. */ export interface ResponseFunctionToolCall { - /** - * The unique ID of the function tool call. - */ - id: string; - /** * A JSON string of the arguments to pass to the function. */ @@ -1387,6 +1382,11 @@ export interface ResponseFunctionToolCall { */ type: 'function_call'; + /** + * The unique ID of the function tool call. + */ + id?: string; + /** * The status of the item. One of `in_progress`, `completed`, or `incomplete`. * Populated when items are returned via API. @@ -2305,6 +2305,11 @@ export interface ResponseUsage { */ input_tokens: number; + /** + * A detailed breakdown of the input tokens. + */ + input_tokens_details: ResponseUsage.InputTokensDetails; + /** * The number of output tokens. */ @@ -2322,6 +2327,17 @@ export interface ResponseUsage { } export namespace ResponseUsage { + /** + * A detailed breakdown of the input tokens. + */ + export interface InputTokensDetails { + /** + * The number of tokens that were retrieved from the cache. + * [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + */ + cached_tokens: number; + } + /** * A detailed breakdown of the output tokens. */ diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 86b2d2dee..5fbdbba6a 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -171,10 +171,10 @@ export interface Reasoning { * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - effort: ReasoningEffort | null; + effort?: ReasoningEffort | null; /** - * **o-series models only** + * **computer_use_preview only** * * A summary of the reasoning performed by the model. This can be useful for * debugging and understanding the model's reasoning process. One of `concise` or diff --git a/tests/api-resources/batches.test.ts b/tests/api-resources/batches.test.ts index 96e200fb9..7c7397d06 100644 --- a/tests/api-resources/batches.test.ts +++ b/tests/api-resources/batches.test.ts @@ -12,7 +12,7 @@ describe('resource batches', () => { test('create: only required params', async () => { const responsePromise = client.batches.create({ completion_window: '24h', - endpoint: '/v1/chat/completions', + endpoint: '/v1/responses', input_file_id: 'input_file_id', }); const rawResponse = await responsePromise.asResponse(); @@ -27,7 +27,7 @@ describe('resource batches', () => { test('create: required and optional params', async () => { const response = await client.batches.create({ completion_window: '24h', - endpoint: '/v1/chat/completions', + endpoint: '/v1/responses', input_file_id: 'input_file_id', metadata: { foo: 'string' }, }); From ca6266eea5229056a3bc2b5e4225b9ea9eaa459e Mon Sep 17 00:00:00 2001 From: meorphis Date: Tue, 18 Mar 2025 14:08:33 -0400 Subject: [PATCH 447/533] chore(internal): add back release workflow --- .github/workflows/create-releases.yml | 50 +++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 .github/workflows/create-releases.yml diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml new file mode 100644 index 000000000..19b7dd831 --- /dev/null +++ b/.github/workflows/create-releases.yml @@ -0,0 +1,50 @@ +name: Create releases +on: + schedule: + - cron: '0 5 * * *' # every day at 5am UTC + push: + branches: + - master + +jobs: + release: + name: release + if: github.ref == 'refs/heads/master' && github.repository == 'openai/openai-node' + runs-on: ubuntu-latest + environment: publish + permissions: + contents: read + id-token: write + + steps: + - uses: actions/checkout@v4 + + - uses: stainless-api/trigger-release-please@v1 + id: release + with: + repo: ${{ github.event.repository.full_name }} + stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} + + - name: Set up Node + if: ${{ steps.release.outputs.releases_created }} + uses: actions/setup-node@v3 + with: + node-version: '18' + + - name: Install dependencies + if: ${{ steps.release.outputs.releases_created }} + run: | + yarn install + + - name: Publish to NPM + if: ${{ steps.release.outputs.releases_created }} + run: | + bash ./bin/publish-npm + env: + NPM_TOKEN: ${{ secrets.OPENAI_NPM_TOKEN || secrets.NPM_TOKEN }} + + - name: Publish to JSR + if: ${{ steps.release.outputs.releases_created }} + run: | + bash ./bin/publish-jsr + From d2be74a28dec48cd7d88db88af95e8bc608cdede Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 18 Mar 2025 18:52:33 +0000 Subject: [PATCH 448/533] fix(types): ignore missing `id` in responses pagination --- src/resources/responses/input-items.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index 9704be89a..d622b8e58 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -65,6 +65,7 @@ export class InputItems extends APIResource { } export class ResponseItemListDataPage extends CursorPage< + // @ts-ignore some items don't necessarily have the `id` property | ResponseItemList.Message | ResponsesAPI.ResponseOutputMessage | ResponsesAPI.ResponseFileSearchToolCall From 454832606ebe9d5cf8ffd436eac09375f682c495 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 20:48:11 +0000 Subject: [PATCH 449/533] fix(types): improve responses type names (#1392) --- .stats.yml | 2 +- api.md | 8 +- src/resources/responses/index.ts | 7 +- src/resources/responses/input-items.ts | 210 +------------------- src/resources/responses/input-items.ts.orig | 114 +++++++++++ src/resources/responses/responses.ts | 191 +++++++++++++++--- 6 files changed, 298 insertions(+), 234 deletions(-) create mode 100644 src/resources/responses/input-items.ts.orig diff --git a/.stats.yml b/.stats.yml index 1e04d7c26..b03256223 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f763c1a35c8b9b02f1e31b9b2e09e21f98bfe8413e5079c86cbb07da2dd7779b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f3bce04386c4fcfd5037e0477fbaa39010003fd1558eb5185fe4a71dd6a05fdd.yml diff --git a/api.md b/api.md index 2fac07f38..fd8482bf2 100644 --- a/api.md +++ b/api.md @@ -548,6 +548,8 @@ Types: - ResponseCodeInterpreterToolCall - ResponseCompletedEvent - ResponseComputerToolCall +- ResponseComputerToolCallOutputItem +- ResponseComputerToolCallOutputScreenshot - ResponseContent - ResponseContentPartAddedEvent - ResponseContentPartDoneEvent @@ -564,6 +566,8 @@ Types: - ResponseFunctionCallArgumentsDeltaEvent - ResponseFunctionCallArgumentsDoneEvent - ResponseFunctionToolCall +- ResponseFunctionToolCallItem +- ResponseFunctionToolCallOutputItem - ResponseFunctionWebSearch - ResponseInProgressEvent - ResponseIncludable @@ -575,7 +579,9 @@ Types: - ResponseInputImage - ResponseInputItem - ResponseInputMessageContentList +- ResponseInputMessageItem - ResponseInputText +- ResponseItem - ResponseOutputAudio - ResponseOutputItem - ResponseOutputItemAddedEvent @@ -616,4 +622,4 @@ Types: Methods: -- client.responses.inputItems.list(responseId, { ...params }) -> ResponseItemListDataPage +- client.responses.inputItems.list(responseId, { ...params }) -> ResponseItemsPage diff --git a/src/resources/responses/index.ts b/src/resources/responses/index.ts index 84f761a93..ad3f9a386 100644 --- a/src/resources/responses/index.ts +++ b/src/resources/responses/index.ts @@ -1,9 +1,4 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { - ResponseItemListDataPage, - InputItems, - type ResponseItemList, - type InputItemListParams, -} from './input-items'; +export { InputItems, type ResponseItemList, type InputItemListParams } from './input-items'; export { Responses } from './responses'; diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index d622b8e58..f2292e5c6 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -4,7 +4,8 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as ResponsesAPI from './responses'; -import { CursorPage, type CursorPageParams } from '../../pagination'; +import { ResponseItemsPage } from './responses'; +import { type CursorPageParams } from '../../pagination'; export class InputItems extends APIResource { /** @@ -14,68 +15,26 @@ export class InputItems extends APIResource { responseId: string, query?: InputItemListParams, options?: Core.RequestOptions, - ): Core.PagePromise< - ResponseItemListDataPage, - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput - >; + ): Core.PagePromise; list( responseId: string, options?: Core.RequestOptions, - ): Core.PagePromise< - ResponseItemListDataPage, - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput - >; + ): Core.PagePromise; list( responseId: string, query: InputItemListParams | Core.RequestOptions = {}, options?: Core.RequestOptions, - ): Core.PagePromise< - ResponseItemListDataPage, - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput - > { + ): Core.PagePromise { if (isRequestOptions(query)) { return this.list(responseId, {}, query); } - return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemListDataPage, { + return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemsPage, { query, ...options, }); } } -export class ResponseItemListDataPage extends CursorPage< - // @ts-ignore some items don't necessarily have the `id` property - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput -> {} - /** * A list of Response items. */ @@ -83,16 +42,7 @@ export interface ResponseItemList { /** * A list of items used to generate this response. */ - data: Array< - | ResponseItemList.Message - | ResponsesAPI.ResponseOutputMessage - | ResponsesAPI.ResponseFileSearchToolCall - | ResponsesAPI.ResponseComputerToolCall - | ResponseItemList.ComputerCallOutput - | ResponsesAPI.ResponseFunctionWebSearch - | ResponsesAPI.ResponseFunctionToolCall - | ResponseItemList.FunctionCallOutput - >; + data: Array; /** * The ID of the first item in the list. @@ -115,142 +65,6 @@ export interface ResponseItemList { object: 'list'; } -export namespace ResponseItemList { - export interface Message { - /** - * The unique ID of the message input. - */ - id: string; - - /** - * A list of one or many input items to the model, containing different content - * types. - */ - content: ResponsesAPI.ResponseInputMessageContentList; - - /** - * The role of the message input. One of `user`, `system`, or `developer`. - */ - role: 'user' | 'system' | 'developer'; - - /** - * The status of item. One of `in_progress`, `completed`, or `incomplete`. - * Populated when items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - - /** - * The type of the message input. Always set to `message`. - */ - type?: 'message'; - } - - export interface ComputerCallOutput { - /** - * The unique ID of the computer call tool output. - */ - id: string; - - /** - * The ID of the computer tool call that produced the output. - */ - call_id: string; - - /** - * A computer screenshot image used with the computer use tool. - */ - output: ComputerCallOutput.Output; - - /** - * The type of the computer tool call output. Always `computer_call_output`. - */ - type: 'computer_call_output'; - - /** - * The safety checks reported by the API that have been acknowledged by the - * developer. - */ - acknowledged_safety_checks?: Array; - - /** - * The status of the message input. One of `in_progress`, `completed`, or - * `incomplete`. Populated when input items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - } - - export namespace ComputerCallOutput { - /** - * A computer screenshot image used with the computer use tool. - */ - export interface Output { - /** - * Specifies the event type. For a computer screenshot, this property is always set - * to `computer_screenshot`. - */ - type: 'computer_screenshot'; - - /** - * The identifier of an uploaded file that contains the screenshot. - */ - file_id?: string; - - /** - * The URL of the screenshot image. - */ - image_url?: string; - } - - /** - * A pending safety check for the computer call. - */ - export interface AcknowledgedSafetyCheck { - /** - * The ID of the pending safety check. - */ - id: string; - - /** - * The type of the pending safety check. - */ - code: string; - - /** - * Details about the pending safety check. - */ - message: string; - } - } - - export interface FunctionCallOutput { - /** - * The unique ID of the function call tool output. - */ - id: string; - - /** - * The unique ID of the function tool call generated by the model. - */ - call_id: string; - - /** - * A JSON string of the output of the function tool call. - */ - output: string; - - /** - * The type of the function tool call output. Always `function_call_output`. - */ - type: 'function_call_output'; - - /** - * The status of the item. One of `in_progress`, `completed`, or `incomplete`. - * Populated when items are returned via API. - */ - status?: 'in_progress' | 'completed' | 'incomplete'; - } -} - export interface InputItemListParams extends CursorPageParams { /** * An item ID to list items before, used in pagination. @@ -266,12 +80,8 @@ export interface InputItemListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -InputItems.ResponseItemListDataPage = ResponseItemListDataPage; - export declare namespace InputItems { - export { - type ResponseItemList as ResponseItemList, - ResponseItemListDataPage as ResponseItemListDataPage, - type InputItemListParams as InputItemListParams, - }; + export { type ResponseItemList as ResponseItemList, type InputItemListParams as InputItemListParams }; } + +export { ResponseItemsPage }; diff --git a/src/resources/responses/input-items.ts.orig b/src/resources/responses/input-items.ts.orig new file mode 100644 index 000000000..470740b61 --- /dev/null +++ b/src/resources/responses/input-items.ts.orig @@ -0,0 +1,114 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as ResponsesAPI from './responses'; +import { ResponseItemsPage } from './responses'; +import { type CursorPageParams } from '../../pagination'; + +export class InputItems extends APIResource { + /** + * Returns a list of input items for a given response. + */ + list( + responseId: string, + query?: InputItemListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + responseId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + responseId: string, + query: InputItemListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(responseId, {}, query); + } + return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemsPage, { + query, + ...options, + }); + } +} + +<<<<<<< HEAD +export class ResponseItemListDataPage extends CursorPage< + // @ts-ignore some items don't necessarily have the `id` property + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput +> {} + +||||||| parent of e5ea4a71 (fix(types): improve responses type names (#1392)) +export class ResponseItemListDataPage extends CursorPage< + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput +> {} + +======= +>>>>>>> e5ea4a71 (fix(types): improve responses type names (#1392)) +/** + * A list of Response items. + */ +export interface ResponseItemList { + /** + * A list of items used to generate this response. + */ + data: Array; + + /** + * The ID of the first item in the list. + */ + first_id: string; + + /** + * Whether there are more items available. + */ + has_more: boolean; + + /** + * The ID of the last item in the list. + */ + last_id: string; + + /** + * The type of object returned, must be `list`. + */ + object: 'list'; +} + +export interface InputItemListParams extends CursorPageParams { + /** + * An item ID to list items before, used in pagination. + */ + before?: string; + + /** + * The order to return the input items in. Default is `asc`. + * + * - `asc`: Return the input items in ascending order. + * - `desc`: Return the input items in descending order. + */ + order?: 'asc' | 'desc'; +} + +export declare namespace InputItems { + export { type ResponseItemList as ResponseItemList, type InputItemListParams as InputItemListParams }; +} + +export { ResponseItemsPage }; diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 20d67b8ac..b2cd6b56c 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -9,12 +9,13 @@ import { import * as Core from '../../core'; import { APIPromise, isRequestOptions } from '../../core'; import { APIResource } from '../../resource'; -import { Stream } from '../../streaming'; import * as Shared from '../shared'; import * as InputItemsAPI from './input-items'; -import { InputItemListParams, InputItems, ResponseItemList, ResponseItemListDataPage } from './input-items'; +import { InputItemListParams, InputItems, ResponseItemList } from './input-items'; import * as ResponsesAPI from './responses'; import { ResponseStream, ResponseStreamParams } from '../../lib/responses/ResponseStream'; +import { CursorPage } from '../../pagination'; +import { Stream } from '../../streaming'; export interface ParsedResponseOutputText extends ResponseOutputText { parsed: ParsedT | null; @@ -137,6 +138,8 @@ export class Responses extends APIResource { } } +export class ResponseItemsPage extends CursorPage {} + /** * A tool that controls a virtual computer. Learn more about the * [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). @@ -966,6 +969,83 @@ export namespace ResponseComputerToolCall { } } +export interface ResponseComputerToolCallOutputItem { + /** + * The unique ID of the computer call tool output. + */ + id: string; + + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ResponseComputerToolCallOutputScreenshot; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +export namespace ResponseComputerToolCallOutputItem { + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } +} + +/** + * A computer screenshot image used with the computer use tool. + */ +export interface ResponseComputerToolCallOutputScreenshot { + /** + * Specifies the event type. For a computer screenshot, this property is always set + * to `computer_screenshot`. + */ + type: 'computer_screenshot'; + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + file_id?: string; + + /** + * The URL of the screenshot image. + */ + image_url?: string; +} + /** * Multi-modal input and output contents. */ @@ -1394,6 +1474,46 @@ export interface ResponseFunctionToolCall { status?: 'in_progress' | 'completed' | 'incomplete'; } +/** + * A tool call to run a function. See the + * [function calling guide](https://platform.openai.com/docs/guides/function-calling) + * for more information. + */ +export interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall { + /** + * The unique ID of the function call tool output. + */ + id: string; +} + +export interface ResponseFunctionToolCallOutputItem { + /** + * The unique ID of the function call tool output. + */ + id: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + /** * The results of a web search tool call. See the * [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for @@ -1607,7 +1727,7 @@ export namespace ResponseInputItem { /** * A computer screenshot image used with the computer use tool. */ - output: ComputerCallOutput.Output; + output: ResponsesAPI.ResponseComputerToolCallOutputScreenshot; /** * The type of the computer tool call output. Always `computer_call_output`. @@ -1633,27 +1753,6 @@ export namespace ResponseInputItem { } export namespace ComputerCallOutput { - /** - * A computer screenshot image used with the computer use tool. - */ - export interface Output { - /** - * Specifies the event type. For a computer screenshot, this property is always set - * to `computer_screenshot`. - */ - type: 'computer_screenshot'; - - /** - * The identifier of an uploaded file that contains the screenshot. - */ - file_id?: string; - - /** - * The URL of the screenshot image. - */ - image_url?: string; - } - /** * A pending safety check for the computer call. */ @@ -1729,6 +1828,35 @@ export namespace ResponseInputItem { */ export type ResponseInputMessageContentList = Array; +export interface ResponseInputMessageItem { + /** + * The unique ID of the message input. + */ + id: string; + + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; +} + /** * A text input to the model. */ @@ -1744,6 +1872,19 @@ export interface ResponseInputText { type: 'input_text'; } +/** + * Content item used to generate a response. + */ +export type ResponseItem = + | ResponseInputMessageItem + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseComputerToolCall + | ResponseComputerToolCallOutputItem + | ResponseFunctionWebSearch + | ResponseFunctionToolCallItem + | ResponseFunctionToolCallOutputItem; + /** * An audio output from the model. */ @@ -2722,13 +2863,11 @@ export interface ResponseRetrieveParams { } Responses.InputItems = InputItems; -Responses.ResponseItemListDataPage = ResponseItemListDataPage; export declare namespace Responses { export { InputItems as InputItems, type ResponseItemList as ResponseItemList, - ResponseItemListDataPage as ResponseItemListDataPage, type InputItemListParams as InputItemListParams, }; } From d9277683745a854e52ac165a67840a09049e5077 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 20:48:40 +0000 Subject: [PATCH 450/533] release: 4.87.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 0c7a85094..a3649b199 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.87.3" + ".": "4.87.4" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 46a595495..d820d8fcd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 4.87.4 (2025-03-18) + +Full Changelog: [v4.87.3...v4.87.4](https://github.com/openai/openai-node/compare/v4.87.3...v4.87.4) + +### Bug Fixes + +* **api:** correct some Responses types ([#1391](https://github.com/openai/openai-node/issues/1391)) ([af45876](https://github.com/openai/openai-node/commit/af458766ac721fb6cf18e7d78c458506c8bfc4e1)) +* **types:** ignore missing `id` in responses pagination ([1b9d20e](https://github.com/openai/openai-node/commit/1b9d20e71f5afbd4999f1999fe4810175476c5d2)) +* **types:** improve responses type names ([#1392](https://github.com/openai/openai-node/issues/1392)) ([164f476](https://github.com/openai/openai-node/commit/164f47606b41fd3e2850f8209eb1c6e2996a81ff)) + + +### Chores + +* add missing type alias exports ([#1390](https://github.com/openai/openai-node/issues/1390)) ([16c5e22](https://github.com/openai/openai-node/commit/16c5e2261c8c1a0ba96c2d5f475e8b1bc67387d7)) +* **internal:** add back release workflow ([dddf29b](https://github.com/openai/openai-node/commit/dddf29bd914a02d4586b239ec06217389a4409f9)) +* **internal:** remove CI condition ([#1381](https://github.com/openai/openai-node/issues/1381)) ([ef17981](https://github.com/openai/openai-node/commit/ef17981a0bd6b3e971986ece829c5d260d7392d4)) +* **internal:** run CI on update-specs branch ([9fc2130](https://github.com/openai/openai-node/commit/9fc2130b74a5919a3bbd41926903bdb310de4446)) +* **internal:** update release workflows ([90b77d0](https://github.com/openai/openai-node/commit/90b77d09c04d21487aa38fe775c79ae632136813)) + ## 4.87.3 (2025-03-11) Full Changelog: [v4.87.2...v4.87.3](https://github.com/openai/openai-node/compare/v4.87.2...v4.87.3) diff --git a/jsr.json b/jsr.json index 1051fade0..3e7c40d5f 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.87.3", + "version": "4.87.4", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 9967a814d..baddade77 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.87.3", + "version": "4.87.4", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index e84192528..172c899ea 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.87.3'; // x-release-please-version +export const VERSION = '4.87.4'; // x-release-please-version From 2e495267329b6853edff76c415e4c5ddc5e143e8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 20:50:59 +0000 Subject: [PATCH 451/533] chore(internal): version bump (#1393) --- jsr.json.orig | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 jsr.json.orig diff --git a/jsr.json.orig b/jsr.json.orig new file mode 100644 index 000000000..c7b99a6f6 --- /dev/null +++ b/jsr.json.orig @@ -0,0 +1,25 @@ +{ + "name": "@openai/openai", +<<<<<<< HEAD + "version": "4.87.4", + "exports": { + ".": "./index.ts", + "./helpers/zod": "./helpers/zod.ts", + "./beta/realtime/websocket": "./beta/realtime/websocket.ts" + }, + "imports": { + "zod": "npm:zod@3" + }, +||||||| parent of 0603bcac (chore(internal): version bump (#1393)) + "version": "4.87.3", + "exports": "./index.ts", +======= + "version": "4.87.4", + "exports": "./index.ts", +>>>>>>> 0603bcac (chore(internal): version bump (#1393)) + "publish": { + "exclude": [ + "!." + ] + } +} From 023d106185abf62f892bff66faf617eb45777004 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 18:18:34 +0000 Subject: [PATCH 452/533] chore(exports): cleaner resource index imports (#1396) --- src/resources.ts | 1 + 1 file changed, 1 insertion(+) create mode 100644 src/resources.ts diff --git a/src/resources.ts b/src/resources.ts new file mode 100644 index 000000000..b283d5781 --- /dev/null +++ b/src/resources.ts @@ -0,0 +1 @@ +export * from './resources/index'; From 7c3d212b47ee3090f5bbb82dd21026ba532da6e0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 18:28:57 +0000 Subject: [PATCH 453/533] chore(exports): stop using path fallbacks (#1397) --- package.json | 35 +++++++---------------------------- 1 file changed, 7 insertions(+), 28 deletions(-) diff --git a/package.json b/package.json index baddade77..ac540cf0e 100644 --- a/package.json +++ b/package.json @@ -112,38 +112,17 @@ "default": "./dist/index.mjs" }, "./*.mjs": { - "types": [ - "./dist/*.d.ts", - "./dist/*/index.d.ts" - ], - "default": [ - "./dist/*.mjs", - "./dist/*/index.mjs" - ] + "types": "./dist/*.d.ts", + "default": "./dist/*.mjs" }, "./*.js": { - "types": [ - "./dist/*.d.ts", - "./dist/*/index.d.ts" - ], - "default": [ - "./dist/*.js", - "./dist/*/index.js" - ] + "types": "./dist/*.d.ts", + "default": "./dist/*.js" }, "./*": { - "types": [ - "./dist/*.d.ts", - "./dist/*/index.d.ts" - ], - "require": [ - "./dist/*.js", - "./dist/*/index.js" - ], - "default": [ - "./dist/*.mjs", - "./dist/*/index.mjs" - ] + "types": "./dist/*.d.ts", + "require": "./dist/*.js", + "default": "./dist/*.mjs" } }, "bin": "./bin/cli", From aefd2675154ff848032a7fec856f0db6ed2ad629 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 20:35:03 +0000 Subject: [PATCH 454/533] feat(api): o1-pro now available through the API (#1398) --- .stats.yml | 2 +- api.md | 2 ++ src/index.ts | 2 ++ src/resources/responses/responses.ts | 6 +++--- src/resources/shared.ts | 27 ++++++++++++++++++++++----- 5 files changed, 30 insertions(+), 9 deletions(-) diff --git a/.stats.yml b/.stats.yml index b03256223..e0b06dc22 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f3bce04386c4fcfd5037e0477fbaa39010003fd1558eb5185fe4a71dd6a05fdd.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b26121d5df6eb5d3032a45a267473798b15fcfec76dd44a3256cf1238be05fa4.yml diff --git a/api.md b/api.md index fd8482bf2..9b3aec141 100644 --- a/api.md +++ b/api.md @@ -2,6 +2,7 @@ Types: +- AllModels - ChatModel - ComparisonFilter - CompoundFilter @@ -14,6 +15,7 @@ Types: - ResponseFormatJSONObject - ResponseFormatJSONSchema - ResponseFormatText +- ResponsesModel # Completions diff --git a/src/index.ts b/src/index.ts index 34cc3e84d..931894f2f 100644 --- a/src/index.ts +++ b/src/index.ts @@ -508,6 +508,7 @@ export declare namespace OpenAI { export { Responses as Responses }; + export type AllModels = API.AllModels; export type ChatModel = API.ChatModel; export type ComparisonFilter = API.ComparisonFilter; export type CompoundFilter = API.CompoundFilter; @@ -520,6 +521,7 @@ export declare namespace OpenAI { export type ResponseFormatJSONObject = API.ResponseFormatJSONObject; export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; export type ResponseFormatText = API.ResponseFormatText; + export type ResponsesModel = API.ResponsesModel; } // ---------------------- Azure ---------------------- diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index b2cd6b56c..b90d415bd 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -327,7 +327,7 @@ export interface Response { * [model guide](https://platform.openai.com/docs/models) to browse and compare * available models. */ - model: (string & {}) | Shared.ChatModel; + model: Shared.ResponsesModel; /** * The object type of this resource - always set to `response`. @@ -1481,7 +1481,7 @@ export interface ResponseFunctionToolCall { */ export interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall { /** - * The unique ID of the function call tool output. + * The unique ID of the function tool call. */ id: string; } @@ -2679,7 +2679,7 @@ export interface ResponseCreateParamsBase { * [model guide](https://platform.openai.com/docs/models) to browse and compare * available models. */ - model: (string & {}) | Shared.ChatModel; + model: Shared.ResponsesModel; /** * Specify additional output data to include in the model response. Currently diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 5fbdbba6a..2c0fb1c32 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,5 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export type AllModels = + | string + | ChatModel + | string + | ChatModel + | 'o1-pro' + | 'o1-pro-2025-03-19' + | 'computer-use-preview' + | 'computer-use-preview-2025-03-11'; + export type ChatModel = | 'o3-mini' | 'o3-mini-2025-01-31' @@ -9,11 +19,6 @@ export type ChatModel = | 'o1-preview-2024-09-12' | 'o1-mini' | 'o1-mini-2024-09-12' - | 'computer-use-preview' - | 'computer-use-preview-2025-02-04' - | 'computer-use-preview-2025-03-11' - | 'gpt-4.5-preview' - | 'gpt-4.5-preview-2025-02-27' | 'gpt-4o' | 'gpt-4o-2024-11-20' | 'gpt-4o-2024-08-06' @@ -23,6 +28,10 @@ export type ChatModel = | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-mini-audio-preview' | 'gpt-4o-mini-audio-preview-2024-12-17' + | 'gpt-4o-search-preview' + | 'gpt-4o-mini-search-preview' + | 'gpt-4o-search-preview-2025-03-11' + | 'gpt-4o-mini-search-preview-2025-03-11' | 'chatgpt-4o-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' @@ -265,3 +274,11 @@ export interface ResponseFormatText { */ type: 'text'; } + +export type ResponsesModel = + | (string & {}) + | ChatModel + | 'o1-pro' + | 'o1-pro-2025-03-19' + | 'computer-use-preview' + | 'computer-use-preview-2025-03-11'; From 20e97a4373711f0380f488477c3888e90d7134ac Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 21:04:38 +0000 Subject: [PATCH 455/533] release: 4.88.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 15 +++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 19 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a3649b199..424ace296 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.87.4" + ".": "4.88.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d820d8fcd..e2a73af85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 4.88.0 (2025-03-19) + +Full Changelog: [v4.87.4...v4.88.0](https://github.com/openai/openai-node/compare/v4.87.4...v4.88.0) + +### Features + +* **api:** o1-pro now available through the API ([#1398](https://github.com/openai/openai-node/issues/1398)) ([616a7e9](https://github.com/openai/openai-node/commit/616a7e90e764882cd749a65af8cc6ae8734fc80d)) + + +### Chores + +* **exports:** cleaner resource index imports ([#1396](https://github.com/openai/openai-node/issues/1396)) ([26b0856](https://github.com/openai/openai-node/commit/26b0856cd63846c34b75895a1ea42ceec7908c1a)) +* **exports:** stop using path fallbacks ([#1397](https://github.com/openai/openai-node/issues/1397)) ([d1479c2](https://github.com/openai/openai-node/commit/d1479c23aff68dd46c73fd31896dd2298a6bf140)) +* **internal:** version bump ([#1393](https://github.com/openai/openai-node/issues/1393)) ([7f16c3a](https://github.com/openai/openai-node/commit/7f16c3aa7b1ab36541219c5a0f93fc518733d0e3)) + ## 4.87.4 (2025-03-18) Full Changelog: [v4.87.3...v4.87.4](https://github.com/openai/openai-node/compare/v4.87.3...v4.87.4) diff --git a/jsr.json b/jsr.json index 3e7c40d5f..ed87ee6d0 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.87.4", + "version": "4.88.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index ac540cf0e..471fafc31 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.87.4", + "version": "4.88.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 172c899ea..c56dab45e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.87.4'; // x-release-please-version +export const VERSION = '4.88.0'; // x-release-please-version From d11b13cdf5412f03e551365297a27e610a36edda Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 16:13:23 +0000 Subject: [PATCH 456/533] feat(api): new models for TTS, STT, + new audio features for Realtime (#1407) --- .stats.yml | 4 +- api.md | 18 + src/resources/audio/audio.ts | 17 +- src/resources/audio/index.ts | 6 + src/resources/audio/speech.ts | 10 +- src/resources/audio/transcriptions.ts | 224 +++++++- src/resources/audio/translations.ts | 2 +- src/resources/beta/realtime/index.ts | 5 + src/resources/beta/realtime/realtime.ts | 522 ++++++++++++++++-- src/resources/beta/realtime/sessions.ts | 236 ++++++-- .../beta/realtime/transcription-sessions.ts | 308 +++++++++++ src/resources/chat/completions/completions.ts | 2 +- tests/api-resources/audio/speech.test.ts | 1 + .../audio/transcriptions.test.ts | 6 +- .../realtime/transcription-sessions.test.ts | 22 + 15 files changed, 1247 insertions(+), 136 deletions(-) create mode 100644 src/resources/beta/realtime/transcription-sessions.ts create mode 100644 tests/api-resources/beta/realtime/transcription-sessions.test.ts diff --git a/.stats.yml b/.stats.yml index e0b06dc22..abb937131 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b26121d5df6eb5d3032a45a267473798b15fcfec76dd44a3256cf1238be05fa4.yml +configured_endpoints: 82 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c22f59c66aec7914b6ee653d3098d1c1c8c16c180d2a158e819c8ddbf476f74b.yml diff --git a/api.md b/api.md index 9b3aec141..cf464cf63 100644 --- a/api.md +++ b/api.md @@ -142,7 +142,11 @@ Types: Types: - Transcription +- TranscriptionInclude - TranscriptionSegment +- TranscriptionStreamEvent +- TranscriptionTextDeltaEvent +- TranscriptionTextDoneEvent - TranscriptionVerbose - TranscriptionWord - TranscriptionCreateResponse @@ -306,7 +310,9 @@ Types: - ConversationItemDeleteEvent - ConversationItemDeletedEvent - ConversationItemInputAudioTranscriptionCompletedEvent +- ConversationItemInputAudioTranscriptionDeltaEvent - ConversationItemInputAudioTranscriptionFailedEvent +- ConversationItemRetrieveEvent - ConversationItemTruncateEvent - ConversationItemTruncatedEvent - ConversationItemWithReference @@ -343,6 +349,8 @@ Types: - SessionCreatedEvent - SessionUpdateEvent - SessionUpdatedEvent +- TranscriptionSessionUpdate +- TranscriptionSessionUpdatedEvent ### Sessions @@ -355,6 +363,16 @@ Methods: - client.beta.realtime.sessions.create({ ...params }) -> SessionCreateResponse +### TranscriptionSessions + +Types: + +- TranscriptionSession + +Methods: + +- client.beta.realtime.transcriptionSessions.create({ ...params }) -> TranscriptionSession + ## Assistants Types: diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index b9a7ad4f8..071fe5929 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -7,8 +7,14 @@ import * as TranscriptionsAPI from './transcriptions'; import { Transcription, TranscriptionCreateParams, + TranscriptionCreateParamsNonStreaming, + TranscriptionCreateParamsStreaming, TranscriptionCreateResponse, + TranscriptionInclude, TranscriptionSegment, + TranscriptionStreamEvent, + TranscriptionTextDeltaEvent, + TranscriptionTextDoneEvent, TranscriptionVerbose, TranscriptionWord, Transcriptions, @@ -28,11 +34,12 @@ export class Audio extends APIResource { speech: SpeechAPI.Speech = new SpeechAPI.Speech(this._client); } -export type AudioModel = 'whisper-1'; +export type AudioModel = 'whisper-1' | 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe'; /** * The format of the output, in one of these options: `json`, `text`, `srt`, - * `verbose_json`, or `vtt`. + * `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + * the only supported format is `json`. */ export type AudioResponseFormat = 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt'; @@ -46,11 +53,17 @@ export declare namespace Audio { export { Transcriptions as Transcriptions, type Transcription as Transcription, + type TranscriptionInclude as TranscriptionInclude, type TranscriptionSegment as TranscriptionSegment, + type TranscriptionStreamEvent as TranscriptionStreamEvent, + type TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent, + type TranscriptionTextDoneEvent as TranscriptionTextDoneEvent, type TranscriptionVerbose as TranscriptionVerbose, type TranscriptionWord as TranscriptionWord, type TranscriptionCreateResponse as TranscriptionCreateResponse, type TranscriptionCreateParams as TranscriptionCreateParams, + type TranscriptionCreateParamsNonStreaming as TranscriptionCreateParamsNonStreaming, + type TranscriptionCreateParamsStreaming as TranscriptionCreateParamsStreaming, }; export { diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts index 2bbe9e3ab..deed39ede 100644 --- a/src/resources/audio/index.ts +++ b/src/resources/audio/index.ts @@ -5,11 +5,17 @@ export { Speech, type SpeechModel, type SpeechCreateParams } from './speech'; export { Transcriptions, type Transcription, + type TranscriptionInclude, type TranscriptionSegment, + type TranscriptionStreamEvent, + type TranscriptionTextDeltaEvent, + type TranscriptionTextDoneEvent, type TranscriptionVerbose, type TranscriptionWord, type TranscriptionCreateResponse, type TranscriptionCreateParams, + type TranscriptionCreateParamsNonStreaming, + type TranscriptionCreateParamsStreaming, } from './transcriptions'; export { Translations, diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 35e82c4c1..4324028d5 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -18,7 +18,7 @@ export class Speech extends APIResource { } } -export type SpeechModel = 'tts-1' | 'tts-1-hd'; +export type SpeechModel = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts'; export interface SpeechCreateParams { /** @@ -28,7 +28,7 @@ export interface SpeechCreateParams { /** * One of the available [TTS models](https://platform.openai.com/docs/models#tts): - * `tts-1` or `tts-1-hd` + * `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. */ model: (string & {}) | SpeechModel; @@ -40,6 +40,12 @@ export interface SpeechCreateParams { */ voice: 'alloy' | 'ash' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer'; + /** + * Control the voice of your generated audio with additional instructions. Does not + * work with `tts-1` or `tts-1-hd`. + */ + instructions?: string; + /** * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, * `wav`, and `pcm`. diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 6fbe96b58..7f797c709 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -2,29 +2,42 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; +import * as TranscriptionsAPI from './transcriptions'; import * as AudioAPI from './audio'; +import { Stream } from '../../streaming'; export class Transcriptions extends APIResource { /** * Transcribes audio into the input language. */ create( - body: TranscriptionCreateParams<'json' | undefined>, + body: TranscriptionCreateParamsNonStreaming<'json' | undefined>, options?: Core.RequestOptions, ): Core.APIPromise; create( - body: TranscriptionCreateParams<'verbose_json'>, + body: TranscriptionCreateParamsNonStreaming<'verbose_json'>, options?: Core.RequestOptions, ): Core.APIPromise; create( - body: TranscriptionCreateParams<'srt' | 'vtt' | 'text'>, + body: TranscriptionCreateParamsNonStreaming<'srt' | 'vtt' | 'text'>, options?: Core.RequestOptions, ): Core.APIPromise; - create(body: TranscriptionCreateParams, options?: Core.RequestOptions): Core.APIPromise; + create( + body: TranscriptionCreateParamsNonStreaming, + options?: Core.RequestOptions, + ): Core.APIPromise; + create( + body: TranscriptionCreateParamsStreaming, + options?: Core.RequestOptions, + ): Core.APIPromise>; + create( + body: TranscriptionCreateParamsStreaming, + options?: Core.RequestOptions, + ): Core.APIPromise>; create( body: TranscriptionCreateParams, options?: Core.RequestOptions, - ): Core.APIPromise { + ): Core.APIPromise> { return this._client.post( '/audio/transcriptions', Core.multipartFormRequestOptions({ body, ...options, __metadata: { model: body.model } }), @@ -41,8 +54,36 @@ export interface Transcription { * The transcribed text. */ text: string; + + /** + * The log probabilities of the tokens in the transcription. Only returned with the + * models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added + * to the `include` array. + */ + logprobs?: Array; } +export namespace Transcription { + export interface Logprob { + /** + * The token in the transcription. + */ + token?: string; + + /** + * The bytes of the token. + */ + bytes?: Array; + + /** + * The log probability of the token. + */ + logprob?: number; + } +} + +export type TranscriptionInclude = 'logprobs'; + export interface TranscriptionSegment { /** * Unique identifier of the segment. @@ -98,6 +139,103 @@ export interface TranscriptionSegment { tokens: Array; } +/** + * Emitted when there is an additional text delta. This is also the first event + * emitted when the transcription starts. Only emitted when you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `Stream` parameter set to `true`. + */ +export type TranscriptionStreamEvent = TranscriptionTextDeltaEvent | TranscriptionTextDoneEvent; + +/** + * Emitted when there is an additional text delta. This is also the first event + * emitted when the transcription starts. Only emitted when you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `Stream` parameter set to `true`. + */ +export interface TranscriptionTextDeltaEvent { + /** + * The text delta that was additionally transcribed. + */ + delta: string; + + /** + * The type of the event. Always `transcript.text.delta`. + */ + type: 'transcript.text.delta'; + + /** + * The log probabilities of the delta. Only included if you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `include[]` parameter set to `logprobs`. + */ + logprobs?: Array; +} + +export namespace TranscriptionTextDeltaEvent { + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token?: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes?: Array; + + /** + * The log probability of the token. + */ + logprob?: number; + } +} + +/** + * Emitted when the transcription is complete. Contains the complete transcription + * text. Only emitted when you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `Stream` parameter set to `true`. + */ +export interface TranscriptionTextDoneEvent { + /** + * The text that was transcribed. + */ + text: string; + + /** + * The type of the event. Always `transcript.text.done`. + */ + type: 'transcript.text.done'; + + /** + * The log probabilities of the individual tokens in the transcription. Only + * included if you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `include[]` parameter set to `logprobs`. + */ + logprobs?: Array; +} + +export namespace TranscriptionTextDoneEvent { + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token?: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes?: Array; + + /** + * The log probability of the token. + */ + logprob?: number; + } +} + /** * Represents a verbose json transcription response returned by model, based on the * provided input. @@ -152,7 +290,11 @@ export interface TranscriptionWord { */ export type TranscriptionCreateResponse = Transcription | TranscriptionVerbose; -export interface TranscriptionCreateParams< +export type TranscriptionCreateParams< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, +> = TranscriptionCreateParamsNonStreaming | TranscriptionCreateParamsStreaming; + +export interface TranscriptionCreateParamsBase< ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, > { /** @@ -162,11 +304,21 @@ export interface TranscriptionCreateParams< file: Core.Uploadable; /** - * ID of the model to use. Only `whisper-1` (which is powered by our open source - * Whisper V2 model) is currently available. + * ID of the model to use. The options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + * Whisper V2 model). */ model: (string & {}) | AudioAPI.AudioModel; + /** + * Additional information to include in the transcription response. `logprobs` will + * return the log probabilities of the tokens in the response to understand the + * model's confidence in the transcription. `logprobs` only works with + * response_format set to `json` and only with the models `gpt-4o-transcribe` and + * `gpt-4o-mini-transcribe`. + */ + include?: Array; + /** * The language of the input audio. Supplying the input language in * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) @@ -184,10 +336,23 @@ export interface TranscriptionCreateParams< /** * The format of the output, in one of these options: `json`, `text`, `srt`, - * `verbose_json`, or `vtt`. + * `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + * the only supported format is `json`. */ response_format?: ResponseFormat; + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + * for more information. + * + * Note: Streaming is not supported for the `whisper-1` model and will be ignored. + */ + stream?: boolean | null; + /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the * output more random, while lower values like 0.2 will make it more focused and @@ -207,13 +372,54 @@ export interface TranscriptionCreateParams< timestamp_granularities?: Array<'word' | 'segment'>; } +export namespace TranscriptionCreateParams { + export type TranscriptionCreateParamsNonStreaming = TranscriptionsAPI.TranscriptionCreateParamsNonStreaming; + export type TranscriptionCreateParamsStreaming = TranscriptionsAPI.TranscriptionCreateParamsStreaming; +} + +export interface TranscriptionCreateParamsNonStreaming< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, +> extends TranscriptionCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + * for more information. + * + * Note: Streaming is not supported for the `whisper-1` model and will be ignored. + */ + stream?: false | null; +} + +export interface TranscriptionCreateParamsStreaming extends TranscriptionCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + * for more information. + * + * Note: Streaming is not supported for the `whisper-1` model and will be ignored. + */ + stream: true; +} + export declare namespace Transcriptions { export { type Transcription as Transcription, + type TranscriptionInclude as TranscriptionInclude, type TranscriptionSegment as TranscriptionSegment, + type TranscriptionStreamEvent as TranscriptionStreamEvent, + type TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent, + type TranscriptionTextDoneEvent as TranscriptionTextDoneEvent, type TranscriptionVerbose as TranscriptionVerbose, type TranscriptionWord as TranscriptionWord, type TranscriptionCreateResponse as TranscriptionCreateResponse, type TranscriptionCreateParams as TranscriptionCreateParams, + type TranscriptionCreateParamsNonStreaming as TranscriptionCreateParamsNonStreaming, + type TranscriptionCreateParamsStreaming as TranscriptionCreateParamsStreaming, }; } diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index dac519ede..df312f876 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -88,7 +88,7 @@ export interface TranslationCreateParams< * The format of the output, in one of these options: `json`, `text`, `srt`, * `verbose_json`, or `vtt`. */ - response_format?: ResponseFormat; + response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt'; /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the diff --git a/src/resources/beta/realtime/index.ts b/src/resources/beta/realtime/index.ts index 66c3ecaae..ba51d8a66 100644 --- a/src/resources/beta/realtime/index.ts +++ b/src/resources/beta/realtime/index.ts @@ -2,3 +2,8 @@ export { Realtime } from './realtime'; export { Sessions, type Session, type SessionCreateResponse, type SessionCreateParams } from './sessions'; +export { + TranscriptionSessions, + type TranscriptionSession, + type TranscriptionSessionCreateParams, +} from './transcription-sessions'; diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 5e2b1c833..d0a74840b 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -10,9 +10,17 @@ import { SessionCreateResponse, Sessions, } from './sessions'; +import * as TranscriptionSessionsAPI from './transcription-sessions'; +import { + TranscriptionSession, + TranscriptionSessionCreateParams, + TranscriptionSessions, +} from './transcription-sessions'; export class Realtime extends APIResource { sessions: SessionsAPI.Sessions = new SessionsAPI.Sessions(this._client); + transcriptionSessions: TranscriptionSessionsAPI.TranscriptionSessions = + new TranscriptionSessionsAPI.TranscriptionSessions(this._client); } /** @@ -300,6 +308,91 @@ export interface ConversationItemInputAudioTranscriptionCompletedEvent { * The event type, must be `conversation.item.input_audio_transcription.completed`. */ type: 'conversation.item.input_audio_transcription.completed'; + + /** + * The log probabilities of the transcription. + */ + logprobs?: Array | null; +} + +export namespace ConversationItemInputAudioTranscriptionCompletedEvent { + /** + * A log probability object. + */ + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes: Array; + + /** + * The log probability of the token. + */ + logprob: number; + } +} + +/** + * Returned when the text value of an input audio transcription content part is + * updated. + */ +export interface ConversationItemInputAudioTranscriptionDeltaEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.input_audio_transcription.delta`. + */ + type: 'conversation.item.input_audio_transcription.delta'; + + /** + * The index of the content part in the item's content array. + */ + content_index?: number; + + /** + * The text delta. + */ + delta?: string; + + /** + * The log probabilities of the transcription. + */ + logprobs?: Array | null; +} + +export namespace ConversationItemInputAudioTranscriptionDeltaEvent { + /** + * A log probability object. + */ + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes: Array; + + /** + * The log probability of the token. + */ + logprob: number; + } } /** @@ -361,6 +454,30 @@ export namespace ConversationItemInputAudioTranscriptionFailedEvent { } } +/** + * Send this event when you want to retrieve the server's representation of a + * specific item in the conversation history. This is useful, for example, to + * inspect user audio after noise cancellation and VAD. The server will respond + * with a `conversation.item.retrieved` event, unless the item does not exist in + * the conversation history, in which case the server will respond with an error. + */ +export interface ConversationItemRetrieveEvent { + /** + * The ID of the item to retrieve. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.retrieve`. + */ + type: 'conversation.item.retrieve'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + /** * Send this event to truncate a previous assistant message’s audio. The server * will produce audio faster than realtime, so this event is useful when the user @@ -789,18 +906,20 @@ export namespace RateLimitsUpdatedEvent { } /** - * All events that the client can send to the Realtime API + * A realtime client event. */ export type RealtimeClientEvent = - | SessionUpdateEvent - | InputAudioBufferAppendEvent - | InputAudioBufferCommitEvent - | InputAudioBufferClearEvent | ConversationItemCreateEvent - | ConversationItemTruncateEvent | ConversationItemDeleteEvent + | ConversationItemRetrieveEvent + | ConversationItemTruncateEvent + | InputAudioBufferAppendEvent + | InputAudioBufferClearEvent + | InputAudioBufferCommitEvent + | ResponseCancelEvent | ResponseCreateEvent - | ResponseCancelEvent; + | SessionUpdateEvent + | TranscriptionSessionUpdate; /** * The response resource. @@ -1009,37 +1128,63 @@ export namespace RealtimeResponseUsage { } /** - * All events that the Realtime API can send back + * A realtime server event. */ export type RealtimeServerEvent = - | ErrorEvent - | SessionCreatedEvent - | SessionUpdatedEvent | ConversationCreatedEvent - | InputAudioBufferCommittedEvent - | InputAudioBufferClearedEvent - | InputAudioBufferSpeechStartedEvent - | InputAudioBufferSpeechStoppedEvent | ConversationItemCreatedEvent + | ConversationItemDeletedEvent | ConversationItemInputAudioTranscriptionCompletedEvent + | ConversationItemInputAudioTranscriptionDeltaEvent | ConversationItemInputAudioTranscriptionFailedEvent + | RealtimeServerEvent.ConversationItemRetrieved | ConversationItemTruncatedEvent - | ConversationItemDeletedEvent + | ErrorEvent + | InputAudioBufferClearedEvent + | InputAudioBufferCommittedEvent + | InputAudioBufferSpeechStartedEvent + | InputAudioBufferSpeechStoppedEvent + | RateLimitsUpdatedEvent + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent | ResponseCreatedEvent | ResponseDoneEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent | ResponseOutputItemAddedEvent | ResponseOutputItemDoneEvent - | ResponseContentPartAddedEvent - | ResponseContentPartDoneEvent | ResponseTextDeltaEvent | ResponseTextDoneEvent - | ResponseAudioTranscriptDeltaEvent - | ResponseAudioTranscriptDoneEvent - | ResponseAudioDeltaEvent - | ResponseAudioDoneEvent - | ResponseFunctionCallArgumentsDeltaEvent - | ResponseFunctionCallArgumentsDoneEvent - | RateLimitsUpdatedEvent; + | SessionCreatedEvent + | SessionUpdatedEvent + | TranscriptionSessionUpdatedEvent; + +export namespace RealtimeServerEvent { + /** + * Returned when a conversation item is retrieved with + * `conversation.item.retrieve`. + */ + export interface ConversationItemRetrieved { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: RealtimeAPI.ConversationItem; + + /** + * The event type, must be `conversation.item.retrieved`. + */ + type: 'conversation.item.retrieved'; + } +} /** * Returned when the model-generated audio is updated. @@ -1834,15 +1979,24 @@ export namespace SessionUpdateEvent { */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: Session.InputAudioNoiseReduction; + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs * asynchronously through - * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) - * and should be treated as rough guidance rather than the representation - * understood by the model. The client can optionally set the language and prompt - * for transcription, these fields will be passed to the Whisper API. + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ input_audio_transcription?: Session.InputAudioTranscription; @@ -1891,7 +2045,8 @@ export namespace SessionUpdateEvent { output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; /** - * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + * temperature of 0.8 is highly recommended for best performance. */ temperature?: number; @@ -1907,9 +2062,16 @@ export namespace SessionUpdateEvent { tools?: Array; /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ turn_detection?: Session.TurnDetection; @@ -1922,15 +2084,31 @@ export namespace SessionUpdateEvent { } export namespace Session { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs * asynchronously through - * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) - * and should be treated as rough guidance rather than the representation - * understood by the model. The client can optionally set the language and prompt - * for transcription, these fields will be passed to the Whisper API. + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ export interface InputAudioTranscription { /** @@ -1941,16 +2119,17 @@ export namespace SessionUpdateEvent { language?: string; /** - * The model to use for transcription, `whisper-1` is the only currently supported - * model. + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. */ model?: string; /** * An optional text to guide the model's style or continue a previous audio - * segment. The - * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - * should match the audio language. + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". */ prompt?: string; } @@ -1979,48 +2158,62 @@ export namespace SessionUpdateEvent { } /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. `true` by default. + * occurs. */ create_response?: boolean; + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. `true` by default. + * occurs. */ interrupt_response?: boolean; /** - * Amount of audio to include before the VAD detected speech (in milliseconds). - * Defaults to 300ms. + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. */ prefix_padding_ms?: number; /** - * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. - * With shorter values the model will respond more quickly, but may jump in on - * short pauses from the user. + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. */ silence_duration_ms?: number; /** - * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher - * threshold will require louder audio to activate the model, and thus might - * perform better in noisy environments. + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. */ threshold?: number; /** - * Type of turn detection, only `server_vad` is currently supported. + * Type of turn detection. */ - type?: string; + type?: 'server_vad' | 'semantic_vad'; } } } @@ -2046,7 +2239,216 @@ export interface SessionUpdatedEvent { type: 'session.updated'; } +/** + * Send this event to update a transcription session. + */ +export interface TranscriptionSessionUpdate { + /** + * Realtime transcription session object configuration. + */ + session: TranscriptionSessionUpdate.Session; + + /** + * The event type, must be `transcription_session.update`. + */ + type: 'transcription_session.update'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +export namespace TranscriptionSessionUpdate { + /** + * Realtime transcription session object configuration. + */ + export interface Session { + /** + * The set of items to include in the transcription. Current available items are: + * + * - `item.input_audio_transcription.logprobs` + */ + include?: Array; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: Session.InputAudioNoiseReduction; + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + input_audio_transcription?: Session.InputAudioTranscription; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + turn_detection?: Session.TurnDetection; + } + + export namespace Session { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. + */ + model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1'; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; + } + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. + */ + create_response?: boolean; + + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. + */ + interrupt_response?: boolean; + + /** + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection. + */ + type?: 'server_vad' | 'semantic_vad'; + } + } +} + +/** + * Returned when a transcription session is updated with a + * `transcription_session.update` event, unless there is an error. + */ +export interface TranscriptionSessionUpdatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * A new Realtime transcription session configuration. + * + * When a session is created on the server via REST API, the session object also + * contains an ephemeral key. Default TTL for keys is one minute. This property is + * not present when a session is updated via the WebSocket API. + */ + session: TranscriptionSessionsAPI.TranscriptionSession; + + /** + * The event type, must be `transcription_session.updated`. + */ + type: 'transcription_session.updated'; +} + Realtime.Sessions = Sessions; +Realtime.TranscriptionSessions = TranscriptionSessions; export declare namespace Realtime { export { @@ -2055,4 +2457,10 @@ export declare namespace Realtime { type SessionCreateResponse as SessionCreateResponse, type SessionCreateParams as SessionCreateParams, }; + + export { + TranscriptionSessions as TranscriptionSessions, + type TranscriptionSession as TranscriptionSession, + type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, + }; } diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index a99c9e045..bae50124e 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -27,7 +27,7 @@ export class Sessions extends APIResource { */ export interface Session { /** - * Unique identifier for the session object. + * Unique identifier for the session that looks like `sess_1234567890abcdef`. */ id?: string; @@ -38,12 +38,24 @@ export interface Session { */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: Session.InputAudioNoiseReduction; + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ input_audio_transcription?: Session.InputAudioTranscription; @@ -79,7 +91,6 @@ export interface Session { * The Realtime model used for this session. */ model?: - | (string & {}) | 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-realtime-preview-2024-12-17' @@ -93,7 +104,8 @@ export interface Session { output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; /** - * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + * temperature of 0.8 is highly recommended for best performance. */ temperature?: number; @@ -109,11 +121,18 @@ export interface Session { tools?: Array; /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ - turn_detection?: Session.TurnDetection | null; + turn_detection?: Session.TurnDetection; /** * The voice the model uses to respond. Voice cannot be changed during the session @@ -124,19 +143,54 @@ export interface Session { } export namespace Session { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs - * asynchronously through Whisper and should be treated as rough guidance rather - * than the representation understood by the model. + * asynchronously through + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ export interface InputAudioTranscription { /** - * The model to use for transcription, `whisper-1` is the only currently supported - * model. + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. */ model?: string; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; } export interface Tool { @@ -163,48 +217,62 @@ export namespace Session { } /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. `true` by default. + * occurs. */ create_response?: boolean; + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. `true` by default. + * occurs. */ interrupt_response?: boolean; /** - * Amount of audio to include before the VAD detected speech (in milliseconds). - * Defaults to 300ms. + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. */ prefix_padding_ms?: number; /** - * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. - * With shorter values the model will respond more quickly, but may jump in on - * short pauses from the user. + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. */ silence_duration_ms?: number; /** - * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher - * threshold will require louder audio to activate the model, and thus might - * perform better in noisy environments. + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. */ threshold?: number; /** - * Type of turn detection, only `server_vad` is currently supported. + * Type of turn detection. */ - type?: 'server_vad'; + type?: 'server_vad' | 'semantic_vad'; } } @@ -394,15 +462,24 @@ export interface SessionCreateParams { */ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: SessionCreateParams.InputAudioNoiseReduction; + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs * asynchronously through - * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) - * and should be treated as rough guidance rather than the representation - * understood by the model. The client can optionally set the language and prompt - * for transcription, these fields will be passed to the Whisper API. + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ input_audio_transcription?: SessionCreateParams.InputAudioTranscription; @@ -451,7 +528,8 @@ export interface SessionCreateParams { output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; /** - * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + * temperature of 0.8 is highly recommended for best performance. */ temperature?: number; @@ -467,9 +545,16 @@ export interface SessionCreateParams { tools?: Array; /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ turn_detection?: SessionCreateParams.TurnDetection; @@ -482,15 +567,31 @@ export interface SessionCreateParams { } export namespace SessionCreateParams { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + /** * Configuration for input audio transcription, defaults to off and can be set to * `null` to turn off once on. Input audio transcription is not native to the * model, since the model consumes audio directly. Transcription runs * asynchronously through - * [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) - * and should be treated as rough guidance rather than the representation - * understood by the model. The client can optionally set the language and prompt - * for transcription, these fields will be passed to the Whisper API. + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. */ export interface InputAudioTranscription { /** @@ -501,16 +602,17 @@ export namespace SessionCreateParams { language?: string; /** - * The model to use for transcription, `whisper-1` is the only currently supported - * model. + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. */ model?: string; /** * An optional text to guide the model's style or continue a previous audio - * segment. The - * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) - * should match the audio language. + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". */ prompt?: string; } @@ -539,48 +641,62 @@ export namespace SessionCreateParams { } /** - * Configuration for turn detection. Can be set to `null` to turn off. Server VAD - * means that the model will detect the start and end of speech based on audio - * volume and respond at the end of user speech. + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. */ export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. `true` by default. + * occurs. */ create_response?: boolean; + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. `true` by default. + * occurs. */ interrupt_response?: boolean; /** - * Amount of audio to include before the VAD detected speech (in milliseconds). - * Defaults to 300ms. + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. */ prefix_padding_ms?: number; /** - * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. - * With shorter values the model will respond more quickly, but may jump in on - * short pauses from the user. + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. */ silence_duration_ms?: number; /** - * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher - * threshold will require louder audio to activate the model, and thus might - * perform better in noisy environments. + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. */ threshold?: number; /** - * Type of turn detection, only `server_vad` is currently supported. + * Type of turn detection. */ - type?: string; + type?: 'server_vad' | 'semantic_vad'; } } diff --git a/src/resources/beta/realtime/transcription-sessions.ts b/src/resources/beta/realtime/transcription-sessions.ts new file mode 100644 index 000000000..d749f8502 --- /dev/null +++ b/src/resources/beta/realtime/transcription-sessions.ts @@ -0,0 +1,308 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; + +export class TranscriptionSessions extends APIResource { + /** + * Create an ephemeral API token for use in client-side applications with the + * Realtime API specifically for realtime transcriptions. Can be configured with + * the same session parameters as the `transcription_session.update` client event. + * + * It responds with a session object, plus a `client_secret` key which contains a + * usable ephemeral API token that can be used to authenticate browser clients for + * the Realtime API. + */ + create( + body: TranscriptionSessionCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/realtime/transcription_sessions', { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } +} + +/** + * A new Realtime transcription session configuration. + * + * When a session is created on the server via REST API, the session object also + * contains an ephemeral key. Default TTL for keys is one minute. This property is + * not present when a session is updated via the WebSocket API. + */ +export interface TranscriptionSession { + /** + * Ephemeral key returned by the API. Only present when the session is created on + * the server via REST API. + */ + client_secret: TranscriptionSession.ClientSecret; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: string; + + /** + * Configuration of the transcription model. + */ + input_audio_transcription?: TranscriptionSession.InputAudioTranscription; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: TranscriptionSession.TurnDetection; +} + +export namespace TranscriptionSession { + /** + * Ephemeral key returned by the API. Only present when the session is created on + * the server via REST API. + */ + export interface ClientSecret { + /** + * Timestamp for when the token expires. Currently, all tokens expire after one + * minute. + */ + expires_at: number; + + /** + * Ephemeral key usable in client environments to authenticate connections to the + * Realtime API. Use this in client-side environments rather than a standard API + * token, which should only be used server-side. + */ + value: string; + } + + /** + * Configuration of the transcription model. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription. Can be `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, or `whisper-1`. + */ + model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1'; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. The + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + * should match the audio language. + */ + prompt?: string; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: string; + } +} + +export interface TranscriptionSessionCreateParams { + /** + * The set of items to include in the transcription. Current available items are: + * + * - `item.input_audio_transcription.logprobs` + */ + include?: Array; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: TranscriptionSessionCreateParams.InputAudioNoiseReduction; + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + input_audio_transcription?: TranscriptionSessionCreateParams.InputAudioTranscription; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + turn_detection?: TranscriptionSessionCreateParams.TurnDetection; +} + +export namespace TranscriptionSessionCreateParams { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. + */ + model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1'; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; + } + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. + */ + create_response?: boolean; + + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. + */ + interrupt_response?: boolean; + + /** + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection. + */ + type?: 'server_vad' | 'semantic_vad'; + } +} + +export declare namespace TranscriptionSessions { + export { + type TranscriptionSession as TranscriptionSession, + type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, + }; +} diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index f54c01597..08bf7f8db 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -383,7 +383,7 @@ export interface ChatCompletionChunk { * **NOTE:** If the stream is interrupted or cancelled, you may not receive the * final usage chunk which contains the total token usage for the request. */ - usage?: CompletionsAPI.CompletionUsage; + usage?: CompletionsAPI.CompletionUsage | null; } export namespace ChatCompletionChunk { diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts index 904d75e5d..cbec6cfac 100644 --- a/tests/api-resources/audio/speech.test.ts +++ b/tests/api-resources/audio/speech.test.ts @@ -14,6 +14,7 @@ describe('resource speech', () => { input: 'input', model: 'string', voice: 'alloy', + instructions: 'instructions', response_format: 'mp3', speed: 0.25, }); diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts index 86ef5e576..2297677b4 100644 --- a/tests/api-resources/audio/transcriptions.test.ts +++ b/tests/api-resources/audio/transcriptions.test.ts @@ -12,7 +12,7 @@ describe('resource transcriptions', () => { test('create: only required params', async () => { const responsePromise = client.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'whisper-1', + model: 'gpt-4o-transcribe', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -26,10 +26,12 @@ describe('resource transcriptions', () => { test('create: required and optional params', async () => { const response = await client.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'whisper-1', + model: 'gpt-4o-transcribe', + include: ['logprobs'], language: 'language', prompt: 'prompt', response_format: 'json', + stream: false, temperature: 0, timestamp_granularities: ['word'], }); diff --git a/tests/api-resources/beta/realtime/transcription-sessions.test.ts b/tests/api-resources/beta/realtime/transcription-sessions.test.ts new file mode 100644 index 000000000..d52ce2403 --- /dev/null +++ b/tests/api-resources/beta/realtime/transcription-sessions.test.ts @@ -0,0 +1,22 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource transcriptionSessions', () => { + test('create', async () => { + const responsePromise = client.beta.realtime.transcriptionSessions.create({}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); +}); From ec5067deba1fe4202d90db42e45c3bd774936af1 Mon Sep 17 00:00:00 2001 From: Kevin Whinnery Date: Thu, 20 Mar 2025 11:34:48 -0500 Subject: [PATCH 457/533] feat: add audio helpers --- examples/speech-to-text.ts | 19 +++++ examples/text-to-speech.ts | 23 ++++++ src/helpers/audio.ts | 145 +++++++++++++++++++++++++++++++++++++ 3 files changed, 187 insertions(+) create mode 100644 examples/speech-to-text.ts create mode 100644 examples/text-to-speech.ts create mode 100644 src/helpers/audio.ts diff --git a/examples/speech-to-text.ts b/examples/speech-to-text.ts new file mode 100644 index 000000000..f2eb60b4d --- /dev/null +++ b/examples/speech-to-text.ts @@ -0,0 +1,19 @@ +import OpenAI from 'openai'; +import { recordAudio } from 'openai/helpers/audio'; + +const openai = new OpenAI(); + +async function main(): Promise { + console.log('Recording for 5 seconds...'); + const response = await recordAudio({ timeout: 5000, device: 4 }); + + console.log('Transcribing...'); + const transcription = await openai.audio.transcriptions.create({ + file: response, + model: 'whisper-1', + }); + + console.log(transcription.text); +} + +main().catch(console.error); diff --git a/examples/text-to-speech.ts b/examples/text-to-speech.ts new file mode 100644 index 000000000..5a87adf91 --- /dev/null +++ b/examples/text-to-speech.ts @@ -0,0 +1,23 @@ +import OpenAI from 'openai'; +import { playAudio } from 'openai/helpers/audio'; + +const openai = new OpenAI(); + +const exampleText = ` +I see skies of blue and clouds of white +The bright blessed days, the dark sacred nights +And I think to myself +What a wonderful world +`.trim(); + +async function main(): Promise { + const response = await openai.audio.speech.create({ + model: 'tts-1', + voice: 'nova', + input: exampleText, + }); + + await playAudio(response); +} + +main().catch(console.error); diff --git a/src/helpers/audio.ts b/src/helpers/audio.ts new file mode 100644 index 000000000..f1a6ea371 --- /dev/null +++ b/src/helpers/audio.ts @@ -0,0 +1,145 @@ +import { File } from 'formdata-node'; +import { spawn } from 'node:child_process'; +import { Readable } from 'node:stream'; +import { platform, versions } from 'node:process'; +import { Response } from 'openai/_shims'; + +const DEFAULT_SAMPLE_RATE = 24000; +const DEFAULT_CHANNELS = 1; + +const isNode = Boolean(versions?.node); + +const recordingProviders: Record = { + win32: 'dshow', + darwin: 'avfoundation', + linux: 'alsa', + aix: 'alsa', + android: 'alsa', + freebsd: 'alsa', + haiku: 'alsa', + sunos: 'alsa', + netbsd: 'alsa', + openbsd: 'alsa', + cygwin: 'dshow', +}; + +function isResponse(stream: NodeJS.ReadableStream | Response | File): stream is Response { + return typeof (stream as any).body !== 'undefined'; +} + +function isFile(stream: NodeJS.ReadableStream | Response | File): stream is File { + return stream instanceof File; +} + +async function nodejsPlayAudio(stream: NodeJS.ReadableStream | Response | File): Promise { + return new Promise((resolve, reject) => { + try { + const ffplay = spawn('ffplay', ['-autoexit', '-nodisp', '-i', 'pipe:0']); + + if (isResponse(stream)) { + stream.body.pipe(ffplay.stdin); + } else if (isFile(stream)) { + Readable.from(stream.stream()).pipe(ffplay.stdin); + } else { + stream.pipe(ffplay.stdin); + } + + ffplay.on('close', (code: number) => { + if (code !== 0) { + reject(new Error(`ffplay process exited with code ${code}`)); + } + resolve(); + }); + } catch (error) { + reject(error); + } + }); +} + +export async function playAudio(input: NodeJS.ReadableStream | Response | File): Promise { + if (isNode) { + return nodejsPlayAudio(input); + } + + throw new Error( + 'Play audio is not supported in the browser yet. Check out https://npm.im/wavtools as an alternative.', + ); +} + +type RecordAudioOptions = { + signal?: AbortSignal; + device?: number; + timeout?: number; +}; + +function nodejsRecordAudio({ signal, device, timeout }: RecordAudioOptions = {}): Promise { + return new Promise((resolve, reject) => { + const data: any[] = []; + const provider = recordingProviders[platform]; + try { + const ffmpeg = spawn( + 'ffmpeg', + [ + '-f', + provider, + '-i', + `:${device ?? 0}`, // default audio input device; adjust as needed + '-ar', + DEFAULT_SAMPLE_RATE.toString(), + '-ac', + DEFAULT_CHANNELS.toString(), + '-f', + 'wav', + 'pipe:1', + ], + { + stdio: ['ignore', 'pipe', 'pipe'], + }, + ); + + ffmpeg.stdout.on('data', (chunk) => { + data.push(chunk); + }); + + ffmpeg.on('error', (error) => { + console.error(error); + reject(error); + }); + + ffmpeg.on('close', (code) => { + returnData(); + }); + + function returnData() { + const audioBuffer = Buffer.concat(data); + const audioFile = new File([audioBuffer], 'audio.wav', { type: 'audio/wav' }); + resolve(audioFile); + } + + if (typeof timeout === 'number' && timeout > 0) { + const internalSignal = AbortSignal.timeout(timeout); + internalSignal.addEventListener('abort', () => { + ffmpeg.kill('SIGTERM'); + }); + } + + if (signal) { + signal.addEventListener('abort', () => { + ffmpeg.kill('SIGTERM'); + }); + } + } catch (error) { + reject(error); + } + }); +} + +export async function recordAudio(options: RecordAudioOptions = {}) { + if (isNode) { + return nodejsRecordAudio(options); + } + + throw new Error( + 'Record audio is not supported in the browser. Check out https://npm.im/wavtools as an alternative.', + ); +} From 4b0d0392cc030e33d3889dfd42382c66df5910da Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 16:35:21 +0000 Subject: [PATCH 458/533] release: 4.89.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 424ace296..c77dd18b0 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.88.0" + ".": "4.89.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e2a73af85..4597d6e56 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.89.0 (2025-03-20) + +Full Changelog: [v4.88.0...v4.89.0](https://github.com/openai/openai-node/compare/v4.88.0...v4.89.0) + +### Features + +* add audio helpers ([ea1b6b4](https://github.com/openai/openai-node/commit/ea1b6b4ef38813af568b3662037519da9404b80e)) +* **api:** new models for TTS, STT, + new audio features for Realtime ([#1407](https://github.com/openai/openai-node/issues/1407)) ([142933a](https://github.com/openai/openai-node/commit/142933ae70d06045dbf4661cd72c7fa35ae7903d)) + + +### Chores + +* **internal:** version bump ([#1400](https://github.com/openai/openai-node/issues/1400)) ([6838ab4](https://github.com/openai/openai-node/commit/6838ab4268c7c0e083e7be21ef1a51bdea0f0b57)) + ## 4.88.0 (2025-03-19) Full Changelog: [v4.87.4...v4.88.0](https://github.com/openai/openai-node/compare/v4.87.4...v4.88.0) diff --git a/jsr.json b/jsr.json index ed87ee6d0..3e7fdb744 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.88.0", + "version": "4.89.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 471fafc31..a77975fda 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.88.0", + "version": "4.89.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c56dab45e..dab92ced6 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.88.0'; // x-release-please-version +export const VERSION = '4.89.0'; // x-release-please-version From 1ed4288c7b9ca8fcb00e524bc6f39c255c6661c5 Mon Sep 17 00:00:00 2001 From: Khai Tran Date: Mon, 24 Mar 2025 16:13:16 -0700 Subject: [PATCH 459/533] chore: update next to 14.2.25 for CVE-2025-29927 --- ecosystem-tests/vercel-edge/package-lock.json | 119 +- ecosystem-tests/vercel-edge/package.json | 2 +- examples/package-lock.json | 2007 +++++++++++++++++ examples/package.json | 2 +- 4 files changed, 2080 insertions(+), 50 deletions(-) create mode 100644 examples/package-lock.json diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json index bc820a010..541213a8d 100644 --- a/ecosystem-tests/vercel-edge/package-lock.json +++ b/ecosystem-tests/vercel-edge/package-lock.json @@ -9,7 +9,7 @@ "version": "0.1.0", "dependencies": { "ai": "2.1.34", - "next": "14.1.1", + "next": "^14.2.25", "react": "18.2.0", "react-dom": "18.2.0" }, @@ -1180,17 +1180,19 @@ } }, "node_modules/@next/env": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.1.1.tgz", - "integrity": "sha512-7CnQyD5G8shHxQIIg3c7/pSeYFeMhsNbpU/bmvH7ZnDql7mNRgg8O2JZrhrc/soFnfBnKP4/xXNiiSIPn2w8gA==" + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz", + "integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==", + "license": "MIT" }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.1.1.tgz", - "integrity": "sha512-yDjSFKQKTIjyT7cFv+DqQfW5jsD+tVxXTckSe1KIouKk75t1qZmj/mV3wzdmFb0XHVGtyRjDMulfVG8uCKemOQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz", + "integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "darwin" @@ -1200,12 +1202,13 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.1.1.tgz", - "integrity": "sha512-KCQmBL0CmFmN8D64FHIZVD9I4ugQsDBBEJKiblXGgwn7wBCSe8N4Dx47sdzl4JAg39IkSN5NNrr8AniXLMb3aw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz", + "integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "darwin" @@ -1215,12 +1218,13 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.1.1.tgz", - "integrity": "sha512-YDQfbWyW0JMKhJf/T4eyFr4b3tceTorQ5w2n7I0mNVTFOvu6CGEzfwT3RSAQGTi/FFMTFcuspPec/7dFHuP7Eg==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz", + "integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -1230,12 +1234,13 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.1.1.tgz", - "integrity": "sha512-fiuN/OG6sNGRN/bRFxRvV5LyzLB8gaL8cbDH5o3mEiVwfcMzyE5T//ilMmaTrnA8HLMS6hoz4cHOu6Qcp9vxgQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz", + "integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -1245,12 +1250,13 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.1.1.tgz", - "integrity": "sha512-rv6AAdEXoezjbdfp3ouMuVqeLjE1Bin0AuE6qxE6V9g3Giz5/R3xpocHoAi7CufRR+lnkuUjRBn05SYJ83oKNQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz", + "integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -1260,12 +1266,13 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.1.1.tgz", - "integrity": "sha512-YAZLGsaNeChSrpz/G7MxO3TIBLaMN8QWMr3X8bt6rCvKovwU7GqQlDu99WdvF33kI8ZahvcdbFsy4jAFzFX7og==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz", + "integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -1275,12 +1282,13 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.1.1.tgz", - "integrity": "sha512-1L4mUYPBMvVDMZg1inUYyPvFSduot0g73hgfD9CODgbr4xiTYe0VOMTZzaRqYJYBA9mana0x4eaAaypmWo1r5A==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz", + "integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "win32" @@ -1290,12 +1298,13 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.1.1.tgz", - "integrity": "sha512-jvIE9tsuj9vpbbXlR5YxrghRfMuG0Qm/nZ/1KDHc+y6FpnZ/apsgh+G6t15vefU0zp3WSpTMIdXRUsNl/7RSuw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz", + "integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==", "cpu": [ "ia32" ], + "license": "MIT", "optional": true, "os": [ "win32" @@ -1305,12 +1314,13 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.1.1.tgz", - "integrity": "sha512-S6K6EHDU5+1KrBDLko7/c1MNy/Ya73pIAmvKeFwsF4RmBFJSO7/7YeD4FnZ4iBdzE69PpQ4sOMU9ORKeNuxe8A==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz", + "integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "win32" @@ -1418,11 +1428,19 @@ "@sinonjs/commons": "^3.0.0" } }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "license": "Apache-2.0" + }, "node_modules/@swc/helpers": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.2.tgz", - "integrity": "sha512-E4KcWTpoLHqwPHLxidpOqQbcrZVgi0rsmmZXUle1jXmJfuIf/UWpczUJ7MZZ5tlxytgJXyp0w4PGkkeLiuIdZw==", + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", + "license": "Apache-2.0", "dependencies": { + "@swc/counter": "^0.1.3", "tslib": "^2.4.0" } }, @@ -5061,12 +5079,13 @@ "dev": true }, "node_modules/next": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/next/-/next-14.1.1.tgz", - "integrity": "sha512-McrGJqlGSHeaz2yTRPkEucxQKe5Zq7uPwyeHNmJaZNY4wx9E9QdxmTp310agFRoMuIYgQrCrT3petg13fSVOww==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz", + "integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==", + "license": "MIT", "dependencies": { - "@next/env": "14.1.1", - "@swc/helpers": "0.5.2", + "@next/env": "14.2.25", + "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", "graceful-fs": "^4.2.11", @@ -5080,18 +5099,19 @@ "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.1.1", - "@next/swc-darwin-x64": "14.1.1", - "@next/swc-linux-arm64-gnu": "14.1.1", - "@next/swc-linux-arm64-musl": "14.1.1", - "@next/swc-linux-x64-gnu": "14.1.1", - "@next/swc-linux-x64-musl": "14.1.1", - "@next/swc-win32-arm64-msvc": "14.1.1", - "@next/swc-win32-ia32-msvc": "14.1.1", - "@next/swc-win32-x64-msvc": "14.1.1" + "@next/swc-darwin-arm64": "14.2.25", + "@next/swc-darwin-x64": "14.2.25", + "@next/swc-linux-arm64-gnu": "14.2.25", + "@next/swc-linux-arm64-musl": "14.2.25", + "@next/swc-linux-x64-gnu": "14.2.25", + "@next/swc-linux-x64-musl": "14.2.25", + "@next/swc-win32-arm64-msvc": "14.2.25", + "@next/swc-win32-ia32-msvc": "14.2.25", + "@next/swc-win32-x64-msvc": "14.2.25" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", "react": "^18.2.0", "react-dom": "^18.2.0", "sass": "^1.3.0" @@ -5100,6 +5120,9 @@ "@opentelemetry/api": { "optional": true }, + "@playwright/test": { + "optional": true + }, "sass": { "optional": true } diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json index 4c75dd4fd..5a8fea816 100644 --- a/ecosystem-tests/vercel-edge/package.json +++ b/ecosystem-tests/vercel-edge/package.json @@ -15,7 +15,7 @@ }, "dependencies": { "ai": "2.1.34", - "next": "14.1.1", + "next": "^14.2.25", "react": "18.2.0", "react-dom": "18.2.0" }, diff --git a/examples/package-lock.json b/examples/package-lock.json new file mode 100644 index 000000000..6feb8c5f4 --- /dev/null +++ b/examples/package-lock.json @@ -0,0 +1,2007 @@ +{ + "name": "openai-examples", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "openai-examples", + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "@azure/identity": "^4.2.0", + "dotenv": "^16.4.7", + "express": "^4.18.2", + "next": "^14.2.25", + "openai": "file:..", + "zod-to-json-schema": "^3.21.4" + }, + "devDependencies": { + "@types/body-parser": "^1.19.3", + "@types/express": "^4.17.19", + "@types/web": "^0.0.194" + } + }, + "..": { + "name": "openai", + "version": "4.89.0", + "license": "Apache-2.0", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7" + }, + "bin": { + "openai": "bin/cli" + }, + "devDependencies": { + "@swc/core": "^1.3.102", + "@swc/jest": "^0.2.29", + "@types/jest": "^29.4.0", + "@types/ws": "^8.5.13", + "@typescript-eslint/eslint-plugin": "^6.7.0", + "@typescript-eslint/parser": "^6.7.0", + "eslint": "^8.49.0", + "eslint-plugin-prettier": "^5.0.1", + "eslint-plugin-unused-imports": "^3.0.0", + "fast-check": "^3.22.0", + "iconv-lite": "^0.6.3", + "jest": "^29.4.0", + "prettier": "^3.0.0", + "prettier-2": "npm:prettier@^2", + "ts-jest": "^29.1.0", + "ts-node": "^10.5.0", + "tsc-multi": "^1.1.0", + "tsconfig-paths": "^4.0.0", + "typescript": "^4.8.2", + "ws": "^8.18.0", + "zod": "^3.23.8" + }, + "peerDependencies": { + "ws": "^8.18.0", + "zod": "^3.23.8" + }, + "peerDependenciesMeta": { + "ws": { + "optional": true + }, + "zod": { + "optional": true + } + } + }, + "node_modules/@azure/abort-controller": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@azure/abort-controller/-/abort-controller-2.1.2.tgz", + "integrity": "sha512-nBrLsEWm4J2u5LpAPjxADTlq3trDgVZZXHNKabeXZtpq3d3AbN/KGO82R87rdDz5/lYB024rtEf10/q0urNgsA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-auth": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@azure/core-auth/-/core-auth-1.9.0.tgz", + "integrity": "sha512-FPwHpZywuyasDSLMqJ6fhbOK3TqUdviZNF8OqRGA4W5Ewib2lEEZ+pBsYcBa88B2NGO/SEnYPGhyBqNlE8ilSw==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-util": "^1.11.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-client": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@azure/core-client/-/core-client-1.9.3.tgz", + "integrity": "sha512-/wGw8fJ4mdpJ1Cum7s1S+VQyXt1ihwKLzfabS1O/RDADnmzVc01dHn44qD0BvGH6KlZNzOMW95tEpKqhkCChPA==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.4.0", + "@azure/core-rest-pipeline": "^1.9.1", + "@azure/core-tracing": "^1.0.0", + "@azure/core-util": "^1.6.1", + "@azure/logger": "^1.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-rest-pipeline": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/@azure/core-rest-pipeline/-/core-rest-pipeline-1.19.1.tgz", + "integrity": "sha512-zHeoI3NCs53lLBbWNzQycjnYKsA1CVKlnzSNuSFcUDwBp8HHVObePxrM7HaX+Ha5Ks639H7chNC9HOaIhNS03w==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.8.0", + "@azure/core-tracing": "^1.0.1", + "@azure/core-util": "^1.11.0", + "@azure/logger": "^1.0.0", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-tracing": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@azure/core-tracing/-/core-tracing-1.2.0.tgz", + "integrity": "sha512-UKTiEJPkWcESPYJz3X5uKRYyOcJD+4nYph+KpfdPRnQJVrZfk0KJgdnaAWKfhsBBtAf/D58Az4AvCJEmWgIBAg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-util": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/@azure/core-util/-/core-util-1.11.0.tgz", + "integrity": "sha512-DxOSLua+NdpWoSqULhjDyAZTXFdP/LKkqtYuxxz1SCN289zk3OG8UOpnCQAz/tygyACBtWp/BoO72ptK7msY8g==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/identity": { + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/@azure/identity/-/identity-4.8.0.tgz", + "integrity": "sha512-l9ALUGHtFB/JfsqmA+9iYAp2a+cCwdNO/cyIr2y7nJLJsz1aae6qVP8XxT7Kbudg0IQRSIMXj0+iivFdbD1xPA==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.9.0", + "@azure/core-client": "^1.9.2", + "@azure/core-rest-pipeline": "^1.17.0", + "@azure/core-tracing": "^1.0.0", + "@azure/core-util": "^1.11.0", + "@azure/logger": "^1.0.0", + "@azure/msal-browser": "^4.2.0", + "@azure/msal-node": "^3.2.3", + "events": "^3.0.0", + "jws": "^4.0.0", + "open": "^10.1.0", + "stoppable": "^1.1.0", + "tslib": "^2.2.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/logger": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@azure/logger/-/logger-1.1.4.tgz", + "integrity": "sha512-4IXXzcCdLdlXuCG+8UKEwLA1T1NHqUfanhXYHiQTn+6sfWCZXduqbtXDGceg3Ce5QxTGo7EqmbV6Bi+aqKuClQ==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/msal-browser": { + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/@azure/msal-browser/-/msal-browser-4.8.0.tgz", + "integrity": "sha512-z7kJlMW3IAETyq82LDKJqr++IeOvU728q9lkuTFjEIPUWxnB1OlmuPCF32fYurxOnOnJeFEZxjbEzq8xyP0aag==", + "license": "MIT", + "dependencies": { + "@azure/msal-common": "15.3.0" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@azure/msal-common": { + "version": "15.3.0", + "resolved": "https://registry.npmjs.org/@azure/msal-common/-/msal-common-15.3.0.tgz", + "integrity": "sha512-lh+eZfibGwtQxFnx+mj6cYWn0pwA8tDnn8CBs9P21nC7Uw5YWRwfXaXdVQSMENZ5ojRqR+NzRaucEo4qUvs3pA==", + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@azure/msal-node": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@azure/msal-node/-/msal-node-3.4.0.tgz", + "integrity": "sha512-b4wBaPV68i+g61wFOfl5zh1lQ9UylgCQpI2638pJHV0SINneO78hOFdnX8WCoGw5OOc4Eewth9pYOg7gaiyUYw==", + "license": "MIT", + "dependencies": { + "@azure/msal-common": "15.3.0", + "jsonwebtoken": "^9.0.0", + "uuid": "^8.3.0" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/@next/env": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz", + "integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==", + "license": "MIT" + }, + "node_modules/@next/swc-darwin-arm64": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz", + "integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-darwin-x64": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz", + "integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-gnu": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz", + "integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-musl": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz", + "integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-gnu": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz", + "integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-musl": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz", + "integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-arm64-msvc": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz", + "integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-ia32-msvc": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz", + "integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-x64-msvc": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz", + "integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "license": "Apache-2.0" + }, + "node_modules/@swc/helpers": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", + "license": "Apache-2.0", + "dependencies": { + "@swc/counter": "^0.1.3", + "tslib": "^2.4.0" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.5", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.5.tgz", + "integrity": "sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/express": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.21.tgz", + "integrity": "sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.19.6", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.6.tgz", + "integrity": "sha512-N4LZ2xG7DatVqhCZzOGb1Yi5lMbXSZcmdLDe9EzSndPV2HpWYWzRbaerl2n27irrm94EPpprqa8KpskPT085+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/http-errors": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz", + "integrity": "sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/mime": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", + "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "22.13.13", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.13.tgz", + "integrity": "sha512-ClsL5nMwKaBRwPcCvH8E7+nU4GxHVx1axNvMZTFHMEfNI7oahimt26P5zjVCRrjiIWj6YFXfE1v3dEp94wLcGQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.20.0" + } + }, + "node_modules/@types/qs": { + "version": "6.9.18", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.18.tgz", + "integrity": "sha512-kK7dgTYDyGqS+e2Q4aK9X3D7q234CIZ1Bv0q/7Z5IwRDoADNU81xXJK/YVyLbLTZCoIwUoDoffFeF+p/eIklAA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/range-parser": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", + "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/send": { + "version": "0.17.4", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.4.tgz", + "integrity": "sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "1.15.7", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.7.tgz", + "integrity": "sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/http-errors": "*", + "@types/node": "*", + "@types/send": "*" + } + }, + "node_modules/@types/web": { + "version": "0.0.194", + "resolved": "https://registry.npmjs.org/@types/web/-/web-0.0.194.tgz", + "integrity": "sha512-VKseTFF3Y8SNbpZqdVFNWQ677ujwNyrI9LcySEUwZX5iebbcdE235Lq/vqrfCzj1oFsXyVUUBqq4x8enXSakMA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "license": "MIT", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/agent-base": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", + "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", + "license": "MIT" + }, + "node_modules/body-parser": { + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.13.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/buffer-equal-constant-time": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", + "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", + "license": "BSD-3-Clause" + }, + "node_modules/bundle-name": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-4.1.0.tgz", + "integrity": "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==", + "license": "MIT", + "dependencies": { + "run-applescript": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/busboy": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", + "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", + "dependencies": { + "streamsearch": "^1.1.0" + }, + "engines": { + "node": ">=10.16.0" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001707", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001707.tgz", + "integrity": "sha512-3qtRjw/HQSMlDWf+X79N206fepf4SOOU6SQLMaq/0KkZLmSjPxAkBOQQ+FxbHKfHmYLZFfdWsO3KA90ceHPSnw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/client-only": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", + "license": "MIT" + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "license": "MIT", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/default-browser": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-5.2.1.tgz", + "integrity": "sha512-WY/3TUME0x3KPYdRRxEJJvXRHV4PyPoUsxtZa78lwItwRQRHhd2U9xOscaT/YTf8uCXIAjeJOFBVEh/7FtD8Xg==", + "license": "MIT", + "dependencies": { + "bundle-name": "^4.1.0", + "default-browser-id": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser-id": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-5.0.0.tgz", + "integrity": "sha512-A6p/pu/6fyBcA1TRz/GqWYPViplrftcW2gZC9q79ngNCKAeR/X3gcEdXQHl4KNXV+3wgIJ1CPkJQ3IHM6lcsyA==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/define-lazy-prop": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz", + "integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/dotenv": { + "version": "16.4.7", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", + "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com/" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ecdsa-sig-formatter": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", + "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "license": "MIT", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/express": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", + "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", + "license": "MIT", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.3", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.7.1", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.3.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.3", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.12", + "proxy-addr": "~2.0.7", + "qs": "6.13.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.19.0", + "serve-static": "1.16.2", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/finalhandler": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "license": "MIT", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/http-proxy-agent/node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/http-proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent/node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/https-proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-docker": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz", + "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==", + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-inside-container": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz", + "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==", + "license": "MIT", + "dependencies": { + "is-docker": "^3.0.0" + }, + "bin": { + "is-inside-container": "cli.js" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-wsl": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.0.tgz", + "integrity": "sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==", + "license": "MIT", + "dependencies": { + "is-inside-container": "^1.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT", + "peer": true + }, + "node_modules/jsonwebtoken": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-9.0.2.tgz", + "integrity": "sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ==", + "license": "MIT", + "dependencies": { + "jws": "^3.2.2", + "lodash.includes": "^4.3.0", + "lodash.isboolean": "^3.0.3", + "lodash.isinteger": "^4.0.4", + "lodash.isnumber": "^3.0.3", + "lodash.isplainobject": "^4.0.6", + "lodash.isstring": "^4.0.1", + "lodash.once": "^4.0.0", + "ms": "^2.1.1", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=12", + "npm": ">=6" + } + }, + "node_modules/jsonwebtoken/node_modules/jwa": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", + "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==", + "license": "MIT", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jsonwebtoken/node_modules/jws": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz", + "integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==", + "license": "MIT", + "dependencies": { + "jwa": "^1.4.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jsonwebtoken/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/jwa": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz", + "integrity": "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==", + "license": "MIT", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jws": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", + "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", + "license": "MIT", + "dependencies": { + "jwa": "^2.0.0", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/lodash.includes": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz", + "integrity": "sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w==", + "license": "MIT" + }, + "node_modules/lodash.isboolean": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz", + "integrity": "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==", + "license": "MIT" + }, + "node_modules/lodash.isinteger": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz", + "integrity": "sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA==", + "license": "MIT" + }, + "node_modules/lodash.isnumber": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz", + "integrity": "sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw==", + "license": "MIT" + }, + "node_modules/lodash.isplainobject": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", + "integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==", + "license": "MIT" + }, + "node_modules/lodash.isstring": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", + "integrity": "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==", + "license": "MIT" + }, + "node_modules/lodash.once": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", + "integrity": "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==", + "license": "MIT" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "peer": true, + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/next": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz", + "integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==", + "license": "MIT", + "dependencies": { + "@next/env": "14.2.25", + "@swc/helpers": "0.5.5", + "busboy": "1.6.0", + "caniuse-lite": "^1.0.30001579", + "graceful-fs": "^4.2.11", + "postcss": "8.4.31", + "styled-jsx": "5.1.1" + }, + "bin": { + "next": "dist/bin/next" + }, + "engines": { + "node": ">=18.17.0" + }, + "optionalDependencies": { + "@next/swc-darwin-arm64": "14.2.25", + "@next/swc-darwin-x64": "14.2.25", + "@next/swc-linux-arm64-gnu": "14.2.25", + "@next/swc-linux-arm64-musl": "14.2.25", + "@next/swc-linux-x64-gnu": "14.2.25", + "@next/swc-linux-x64-musl": "14.2.25", + "@next/swc-win32-arm64-msvc": "14.2.25", + "@next/swc-win32-ia32-msvc": "14.2.25", + "@next/swc-win32-x64-msvc": "14.2.25" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "sass": "^1.3.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + }, + "@playwright/test": { + "optional": true + }, + "sass": { + "optional": true + } + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/open": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/open/-/open-10.1.0.tgz", + "integrity": "sha512-mnkeQ1qP5Ue2wd+aivTD3NHd/lZ96Lu0jgf0pwktLPtx6cTZiH7tyeGRRHs0zX0rbrahXPnXlUnbeXyaBBuIaw==", + "license": "MIT", + "dependencies": { + "default-browser": "^5.2.1", + "define-lazy-prop": "^3.0.0", + "is-inside-container": "^1.0.0", + "is-wsl": "^3.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/openai": { + "resolved": "..", + "link": true + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.12", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", + "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/postcss": { + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/run-applescript": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-7.0.0.tgz", + "integrity": "sha512-9by4Ij99JUr/MCFBUkDKLWK3G9HVXmabKz9U5MlIAIuvuzkiOicRYs8XJLxX+xahD+mLiiCYDqF9dKAgtzKP1A==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/send": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/serve-static": { + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", + "license": "MIT", + "dependencies": { + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.19.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/stoppable": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/stoppable/-/stoppable-1.1.0.tgz", + "integrity": "sha512-KXDYZ9dszj6bzvnEMRYvxgeTHU74QBFL54XKtP3nyMuJ81CFYtABZ3bAzL2EdFUaEwJOBOgENyFj3R7oTzDyyw==", + "license": "MIT", + "engines": { + "node": ">=4", + "npm": ">=6" + } + }, + "node_modules/streamsearch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", + "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/styled-jsx": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz", + "integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==", + "license": "MIT", + "dependencies": { + "client-only": "0.0.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "peerDependencies": { + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/undici-types": { + "version": "6.20.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", + "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", + "dev": true, + "license": "MIT" + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/zod": { + "version": "3.24.2", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.24.2.tgz", + "integrity": "sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==", + "license": "MIT", + "peer": true, + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-to-json-schema": { + "version": "3.24.5", + "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.5.tgz", + "integrity": "sha512-/AuWwMP+YqiPbsJx5D6TfgRTc4kTLjsh5SOcd4bLsfUg2RcEXrFMJl1DGgdHy2aCfsIA/cr/1JM0xcB2GZji8g==", + "license": "ISC", + "peerDependencies": { + "zod": "^3.24.1" + } + } + } +} diff --git a/examples/package.json b/examples/package.json index 70ec2c523..db01a2c10 100644 --- a/examples/package.json +++ b/examples/package.json @@ -9,7 +9,7 @@ "@azure/identity": "^4.2.0", "dotenv": "^16.4.7", "express": "^4.18.2", - "next": "^14.1.1", + "next": "^14.2.25", "openai": "file:..", "zod-to-json-schema": "^3.21.4" }, From 22a4d95f8be418827b4b13280c2d7f976bd1ad42 Mon Sep 17 00:00:00 2001 From: Khai Tran Date: Mon, 24 Mar 2025 16:18:28 -0700 Subject: [PATCH 460/533] Update next to patch CVE-2025-29927 --- ecosystem-tests/vercel-edge/package-lock.json | 624 +++++++++++++++--- ecosystem-tests/vercel-edge/package.json | 2 +- 2 files changed, 542 insertions(+), 84 deletions(-) diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json index 541213a8d..770dc460a 100644 --- a/ecosystem-tests/vercel-edge/package-lock.json +++ b/ecosystem-tests/vercel-edge/package-lock.json @@ -9,7 +9,7 @@ "version": "0.1.0", "dependencies": { "ai": "2.1.34", - "next": "^14.2.25", + "next": "^15.2.3", "react": "18.2.0", "react-dom": "18.2.0" }, @@ -777,6 +777,16 @@ "node": ">=16" } }, + "node_modules/@emnapi/runtime": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.3.1.tgz", + "integrity": "sha512-kEBmG8KyqtxJZv+ygbEim+KCGtIq1fC22Ms3S4ziXmYKm8uyoLX0MHONVKwp+9opg390VaKRNt4a7A9NwmpNhw==", + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, "node_modules/@hapi/hoek": { "version": "9.3.0", "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", @@ -792,6 +802,367 @@ "@hapi/hoek": "^9.0.0" } }, + "node_modules/@img/sharp-darwin-arm64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.33.5.tgz", + "integrity": "sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-arm64": "1.0.4" + } + }, + "node_modules/@img/sharp-darwin-x64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.33.5.tgz", + "integrity": "sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-x64": "1.0.4" + } + }, + "node_modules/@img/sharp-libvips-darwin-arm64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.0.4.tgz", + "integrity": "sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-darwin-x64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.0.4.tgz", + "integrity": "sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.0.5.tgz", + "integrity": "sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==", + "cpu": [ + "arm" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.0.4.tgz", + "integrity": "sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-s390x": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.0.4.tgz", + "integrity": "sha512-u7Wz6ntiSSgGSGcjZ55im6uvTrOxSIS8/dgoVMoiGE9I6JAfU50yH5BoDlYA1tcuGS7g/QNtetJnxA6QEsCVTA==", + "cpu": [ + "s390x" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-x64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.0.4.tgz", + "integrity": "sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-arm64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.0.4.tgz", + "integrity": "sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA==", + "cpu": [ + "arm64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-x64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.0.4.tgz", + "integrity": "sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw==", + "cpu": [ + "x64" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-linux-arm": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.33.5.tgz", + "integrity": "sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==", + "cpu": [ + "arm" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm": "1.0.5" + } + }, + "node_modules/@img/sharp-linux-arm64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.33.5.tgz", + "integrity": "sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm64": "1.0.4" + } + }, + "node_modules/@img/sharp-linux-s390x": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.33.5.tgz", + "integrity": "sha512-y/5PCd+mP4CA/sPDKl2961b+C9d+vPAveS33s6Z3zfASk2j5upL6fXVPZi7ztePZ5CuH+1kW8JtvxgbuXHRa4Q==", + "cpu": [ + "s390x" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-s390x": "1.0.4" + } + }, + "node_modules/@img/sharp-linux-x64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.33.5.tgz", + "integrity": "sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-x64": "1.0.4" + } + }, + "node_modules/@img/sharp-linuxmusl-arm64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.33.5.tgz", + "integrity": "sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g==", + "cpu": [ + "arm64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-arm64": "1.0.4" + } + }, + "node_modules/@img/sharp-linuxmusl-x64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.33.5.tgz", + "integrity": "sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-x64": "1.0.4" + } + }, + "node_modules/@img/sharp-wasm32": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.33.5.tgz", + "integrity": "sha512-ykUW4LVGaMcU9lu9thv85CbRMAwfeadCJHRsg2GmeRa/cJxsVY9Rbd57JcMxBkKHag5U/x7TSBpScF4U8ElVzg==", + "cpu": [ + "wasm32" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT", + "optional": true, + "dependencies": { + "@emnapi/runtime": "^1.2.0" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-ia32": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.33.5.tgz", + "integrity": "sha512-T36PblLaTwuVJ/zw/LaH0PdZkRz5rd3SmMHX8GSmR7vtNSP5Z6bQkExdSK7xGWyxLw4sUknBuugTelgw2faBbQ==", + "cpu": [ + "ia32" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-x64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.33.5.tgz", + "integrity": "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg==", + "cpu": [ + "x64" + ], + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, "node_modules/@istanbuljs/load-nyc-config": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", @@ -1180,15 +1551,15 @@ } }, "node_modules/@next/env": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz", - "integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/env/-/env-15.2.3.tgz", + "integrity": "sha512-a26KnbW9DFEUsSxAxKBORR/uD9THoYoKbkpFywMN/AFvboTt94b8+g/07T8J6ACsdLag8/PDU60ov4rPxRAixw==", "license": "MIT" }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz", - "integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.2.3.tgz", + "integrity": "sha512-uaBhA8aLbXLqwjnsHSkxs353WrRgQgiFjduDpc7YXEU0B54IKx3vU+cxQlYwPCyC8uYEEX7THhtQQsfHnvv8dw==", "cpu": [ "arm64" ], @@ -1202,9 +1573,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz", - "integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.2.3.tgz", + "integrity": "sha512-pVwKvJ4Zk7h+4hwhqOUuMx7Ib02u3gDX3HXPKIShBi9JlYllI0nU6TWLbPT94dt7FSi6mSBhfc2JrHViwqbOdw==", "cpu": [ "x64" ], @@ -1218,9 +1589,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz", - "integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.2.3.tgz", + "integrity": "sha512-50ibWdn2RuFFkOEUmo9NCcQbbV9ViQOrUfG48zHBCONciHjaUKtHcYFiCwBVuzD08fzvzkWuuZkd4AqbvKO7UQ==", "cpu": [ "arm64" ], @@ -1234,9 +1605,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz", - "integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.2.3.tgz", + "integrity": "sha512-2gAPA7P652D3HzR4cLyAuVYwYqjG0mt/3pHSWTCyKZq/N/dJcUAEoNQMyUmwTZWCJRKofB+JPuDVP2aD8w2J6Q==", "cpu": [ "arm64" ], @@ -1250,9 +1621,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz", - "integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.2.3.tgz", + "integrity": "sha512-ODSKvrdMgAJOVU4qElflYy1KSZRM3M45JVbeZu42TINCMG3anp7YCBn80RkISV6bhzKwcUqLBAmOiWkaGtBA9w==", "cpu": [ "x64" ], @@ -1266,9 +1637,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz", - "integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.2.3.tgz", + "integrity": "sha512-ZR9kLwCWrlYxwEoytqPi1jhPd1TlsSJWAc+H/CJHmHkf2nD92MQpSRIURR1iNgA/kuFSdxB8xIPt4p/T78kwsg==", "cpu": [ "x64" ], @@ -1282,9 +1653,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz", - "integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.2.3.tgz", + "integrity": "sha512-+G2FrDcfm2YDbhDiObDU/qPriWeiz/9cRR0yMWJeTLGGX6/x8oryO3tt7HhodA1vZ8r2ddJPCjtLcpaVl7TE2Q==", "cpu": [ "arm64" ], @@ -1297,26 +1668,10 @@ "node": ">= 10" } }, - "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz", - "integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==", - "cpu": [ - "ia32" - ], - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">= 10" - } - }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz", - "integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.2.3.tgz", + "integrity": "sha512-gHYS9tc+G2W0ZC8rBL+H6RdtXIyk40uLiaos0yj5US85FNhbFEndMA2nW3z47nzOWiSvXTZ5kBClc3rD0zJg0w==", "cpu": [ "x64" ], @@ -1435,13 +1790,12 @@ "license": "Apache-2.0" }, "node_modules/@swc/helpers": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", - "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", + "version": "0.5.15", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz", + "integrity": "sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==", "license": "Apache-2.0", "dependencies": { - "@swc/counter": "^0.1.3", - "tslib": "^2.4.0" + "tslib": "^2.8.0" } }, "node_modules/@ts-morph/common": { @@ -2669,7 +3023,8 @@ "node_modules/client-only": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", - "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==" + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", + "license": "MIT" }, "node_modules/cliui": { "version": "8.0.1", @@ -2720,11 +3075,25 @@ "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", "dev": true }, + "node_modules/color": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz", + "integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==", + "license": "MIT", + "optional": true, + "dependencies": { + "color-convert": "^2.0.1", + "color-string": "^1.9.0" + }, + "engines": { + "node": ">=12.5.0" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, + "devOptional": true, "dependencies": { "color-name": "~1.1.4" }, @@ -2736,7 +3105,18 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "devOptional": true + }, + "node_modules/color-string": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", + "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", + "license": "MIT", + "optional": true, + "dependencies": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } }, "node_modules/color-support": { "version": "1.1.3", @@ -2918,10 +3298,11 @@ } }, "node_modules/detect-libc": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz", - "integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==", - "dev": true, + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", + "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", + "devOptional": true, + "license": "Apache-2.0", "engines": { "node": ">=8" } @@ -3810,7 +4191,8 @@ "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true }, "node_modules/has-flag": { "version": "4.0.0", @@ -5079,41 +5461,42 @@ "dev": true }, "node_modules/next": { - "version": "14.2.25", - "resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz", - "integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==", + "version": "15.2.3", + "resolved": "https://registry.npmjs.org/next/-/next-15.2.3.tgz", + "integrity": "sha512-x6eDkZxk2rPpu46E1ZVUWIBhYCLszmUY6fvHBFcbzJ9dD+qRX6vcHusaqqDlnY+VngKzKbAiG2iRCkPbmi8f7w==", "license": "MIT", "dependencies": { - "@next/env": "14.2.25", - "@swc/helpers": "0.5.5", + "@next/env": "15.2.3", + "@swc/counter": "0.1.3", + "@swc/helpers": "0.5.15", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", - "graceful-fs": "^4.2.11", "postcss": "8.4.31", - "styled-jsx": "5.1.1" + "styled-jsx": "5.1.6" }, "bin": { "next": "dist/bin/next" }, "engines": { - "node": ">=18.17.0" + "node": "^18.18.0 || ^19.8.0 || >= 20.0.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.2.25", - "@next/swc-darwin-x64": "14.2.25", - "@next/swc-linux-arm64-gnu": "14.2.25", - "@next/swc-linux-arm64-musl": "14.2.25", - "@next/swc-linux-x64-gnu": "14.2.25", - "@next/swc-linux-x64-musl": "14.2.25", - "@next/swc-win32-arm64-msvc": "14.2.25", - "@next/swc-win32-ia32-msvc": "14.2.25", - "@next/swc-win32-x64-msvc": "14.2.25" + "@next/swc-darwin-arm64": "15.2.3", + "@next/swc-darwin-x64": "15.2.3", + "@next/swc-linux-arm64-gnu": "15.2.3", + "@next/swc-linux-arm64-musl": "15.2.3", + "@next/swc-linux-x64-gnu": "15.2.3", + "@next/swc-linux-x64-musl": "15.2.3", + "@next/swc-win32-arm64-msvc": "15.2.3", + "@next/swc-win32-x64-msvc": "15.2.3", + "sharp": "^0.33.5" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", "@playwright/test": "^1.41.2", - "react": "^18.2.0", - "react-dom": "^18.2.0", + "babel-plugin-react-compiler": "*", + "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", + "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", "sass": "^1.3.0" }, "peerDependenciesMeta": { @@ -5123,6 +5506,9 @@ "@playwright/test": { "optional": true }, + "babel-plugin-react-compiler": { + "optional": true + }, "sass": { "optional": true } @@ -5815,6 +6201,59 @@ "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", "dev": true }, + "node_modules/sharp": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.33.5.tgz", + "integrity": "sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw==", + "hasInstallScript": true, + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "color": "^4.2.3", + "detect-libc": "^2.0.3", + "semver": "^7.6.3" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-darwin-arm64": "0.33.5", + "@img/sharp-darwin-x64": "0.33.5", + "@img/sharp-libvips-darwin-arm64": "1.0.4", + "@img/sharp-libvips-darwin-x64": "1.0.4", + "@img/sharp-libvips-linux-arm": "1.0.5", + "@img/sharp-libvips-linux-arm64": "1.0.4", + "@img/sharp-libvips-linux-s390x": "1.0.4", + "@img/sharp-libvips-linux-x64": "1.0.4", + "@img/sharp-libvips-linuxmusl-arm64": "1.0.4", + "@img/sharp-libvips-linuxmusl-x64": "1.0.4", + "@img/sharp-linux-arm": "0.33.5", + "@img/sharp-linux-arm64": "0.33.5", + "@img/sharp-linux-s390x": "0.33.5", + "@img/sharp-linux-x64": "0.33.5", + "@img/sharp-linuxmusl-arm64": "0.33.5", + "@img/sharp-linuxmusl-x64": "0.33.5", + "@img/sharp-wasm32": "0.33.5", + "@img/sharp-win32-ia32": "0.33.5", + "@img/sharp-win32-x64": "0.33.5" + } + }, + "node_modules/sharp/node_modules/semver": { + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "license": "ISC", + "optional": true, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -5848,6 +6287,23 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/simple-swizzle": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", + "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", + "license": "MIT", + "optional": true, + "dependencies": { + "is-arrayish": "^0.3.1" + } + }, + "node_modules/simple-swizzle/node_modules/is-arrayish": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", + "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==", + "license": "MIT", + "optional": true + }, "node_modules/sisteransi": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", @@ -6079,9 +6535,10 @@ } }, "node_modules/styled-jsx": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz", - "integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==", + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.6.tgz", + "integrity": "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==", + "license": "MIT", "dependencies": { "client-only": "0.0.1" }, @@ -6089,7 +6546,7 @@ "node": ">= 12.0.0" }, "peerDependencies": { - "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0" + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0" }, "peerDependenciesMeta": { "@babel/core": { @@ -6386,9 +6843,10 @@ "dev": true }, "node_modules/tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" }, "node_modules/type-detect": { "version": "4.0.8", diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json index 5a8fea816..420bca941 100644 --- a/ecosystem-tests/vercel-edge/package.json +++ b/ecosystem-tests/vercel-edge/package.json @@ -15,7 +15,7 @@ }, "dependencies": { "ai": "2.1.34", - "next": "^14.2.25", + "next": "^15.2.3", "react": "18.2.0", "react-dom": "18.2.0" }, From 23fd3ffef3b19656b27576b4d0c613d19ea1ae2f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 17:58:50 +0000 Subject: [PATCH 461/533] fix(client): remove duplicate types (#1410) --- src/resources/shared.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 2c0fb1c32..3e8ded763 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,9 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export type AllModels = - | string - | ChatModel - | string + | (string & {}) | ChatModel | 'o1-pro' | 'o1-pro-2025-03-19' From 16e21df0a4d8903ae119a5f7445eafc5031c82a9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 20 Mar 2025 17:49:50 -0400 Subject: [PATCH 462/533] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index abb937131..2df281d34 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c22f59c66aec7914b6ee653d3098d1c1c8c16c180d2a158e819c8ddbf476f74b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5ad6884898c07591750dde560118baf7074a59aecd1f367f930c5e42b04e848a.yml From f395e9584ac63780442bb54c2d292914eaecf3c7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 21 Mar 2025 12:11:17 +0000 Subject: [PATCH 463/533] fix: avoid type error in certain environments (#1413) --- src/core.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core.ts b/src/core.ts index a41eaa3fa..1e1cb0a4a 100644 --- a/src/core.ts +++ b/src/core.ts @@ -430,7 +430,7 @@ export abstract class APIClient { !headers ? {} : Symbol.iterator in headers ? Object.fromEntries(Array.from(headers as Iterable).map((header) => [...header])) - : { ...headers } + : { ...(headers as any as Record) } ); } From 06c03d7125d8331679dd206d0e34705d65669046 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 25 Mar 2025 19:08:04 +0000 Subject: [PATCH 464/533] fix(exports): add missing type exports (#1417) --- src/resources/beta/beta.ts | 106 +++++++++++++++++++++++- src/resources/beta/realtime/realtime.ts | 52 ++++++++++++ src/resources/responses/responses.ts | 83 +++++++++++++++++++ 3 files changed, 239 insertions(+), 2 deletions(-) diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 0b909de18..6282d4593 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -22,7 +22,58 @@ import { ThreadStreamEvent, } from './assistants'; import * as RealtimeAPI from './realtime/realtime'; -import { Realtime } from './realtime/realtime'; +import { + ConversationCreatedEvent, + ConversationItem, + ConversationItemContent, + ConversationItemCreateEvent, + ConversationItemCreatedEvent, + ConversationItemDeleteEvent, + ConversationItemDeletedEvent, + ConversationItemInputAudioTranscriptionCompletedEvent, + ConversationItemInputAudioTranscriptionDeltaEvent, + ConversationItemInputAudioTranscriptionFailedEvent, + ConversationItemRetrieveEvent, + ConversationItemTruncateEvent, + ConversationItemTruncatedEvent, + ConversationItemWithReference, + ErrorEvent, + InputAudioBufferAppendEvent, + InputAudioBufferClearEvent, + InputAudioBufferClearedEvent, + InputAudioBufferCommitEvent, + InputAudioBufferCommittedEvent, + InputAudioBufferSpeechStartedEvent, + InputAudioBufferSpeechStoppedEvent, + RateLimitsUpdatedEvent, + Realtime, + RealtimeClientEvent, + RealtimeResponse, + RealtimeResponseStatus, + RealtimeResponseUsage, + RealtimeServerEvent, + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseCancelEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreateEvent, + ResponseCreatedEvent, + ResponseDoneEvent, + ResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, + SessionCreatedEvent, + SessionUpdateEvent, + SessionUpdatedEvent, + TranscriptionSessionUpdate, + TranscriptionSessionUpdatedEvent, +} from './realtime/realtime'; import * as ThreadsAPI from './threads/threads'; import { AssistantResponseFormatOption, @@ -55,7 +106,58 @@ Beta.AssistantsPage = AssistantsPage; Beta.Threads = Threads; export declare namespace Beta { - export { Realtime as Realtime }; + export { + Realtime as Realtime, + type ConversationCreatedEvent as ConversationCreatedEvent, + type ConversationItem as ConversationItem, + type ConversationItemContent as ConversationItemContent, + type ConversationItemCreateEvent as ConversationItemCreateEvent, + type ConversationItemCreatedEvent as ConversationItemCreatedEvent, + type ConversationItemDeleteEvent as ConversationItemDeleteEvent, + type ConversationItemDeletedEvent as ConversationItemDeletedEvent, + type ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent, + type ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent, + type ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent, + type ConversationItemRetrieveEvent as ConversationItemRetrieveEvent, + type ConversationItemTruncateEvent as ConversationItemTruncateEvent, + type ConversationItemTruncatedEvent as ConversationItemTruncatedEvent, + type ConversationItemWithReference as ConversationItemWithReference, + type ErrorEvent as ErrorEvent, + type InputAudioBufferAppendEvent as InputAudioBufferAppendEvent, + type InputAudioBufferClearEvent as InputAudioBufferClearEvent, + type InputAudioBufferClearedEvent as InputAudioBufferClearedEvent, + type InputAudioBufferCommitEvent as InputAudioBufferCommitEvent, + type InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent, + type InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, + type InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent, + type RateLimitsUpdatedEvent as RateLimitsUpdatedEvent, + type RealtimeClientEvent as RealtimeClientEvent, + type RealtimeResponse as RealtimeResponse, + type RealtimeResponseStatus as RealtimeResponseStatus, + type RealtimeResponseUsage as RealtimeResponseUsage, + type RealtimeServerEvent as RealtimeServerEvent, + type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, + type ResponseAudioDoneEvent as ResponseAudioDoneEvent, + type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, + type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, + type ResponseCancelEvent as ResponseCancelEvent, + type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, + type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, + type ResponseCreateEvent as ResponseCreateEvent, + type ResponseCreatedEvent as ResponseCreatedEvent, + type ResponseDoneEvent as ResponseDoneEvent, + type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, + type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, + type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, + type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, + type ResponseTextDeltaEvent as ResponseTextDeltaEvent, + type ResponseTextDoneEvent as ResponseTextDoneEvent, + type SessionCreatedEvent as SessionCreatedEvent, + type SessionUpdateEvent as SessionUpdateEvent, + type SessionUpdatedEvent as SessionUpdatedEvent, + type TranscriptionSessionUpdate as TranscriptionSessionUpdate, + type TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent, + }; export { Chat }; diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index d0a74840b..224d94f37 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -2451,6 +2451,58 @@ Realtime.Sessions = Sessions; Realtime.TranscriptionSessions = TranscriptionSessions; export declare namespace Realtime { + export { + type ConversationCreatedEvent as ConversationCreatedEvent, + type ConversationItem as ConversationItem, + type ConversationItemContent as ConversationItemContent, + type ConversationItemCreateEvent as ConversationItemCreateEvent, + type ConversationItemCreatedEvent as ConversationItemCreatedEvent, + type ConversationItemDeleteEvent as ConversationItemDeleteEvent, + type ConversationItemDeletedEvent as ConversationItemDeletedEvent, + type ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent, + type ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent, + type ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent, + type ConversationItemRetrieveEvent as ConversationItemRetrieveEvent, + type ConversationItemTruncateEvent as ConversationItemTruncateEvent, + type ConversationItemTruncatedEvent as ConversationItemTruncatedEvent, + type ConversationItemWithReference as ConversationItemWithReference, + type ErrorEvent as ErrorEvent, + type InputAudioBufferAppendEvent as InputAudioBufferAppendEvent, + type InputAudioBufferClearEvent as InputAudioBufferClearEvent, + type InputAudioBufferClearedEvent as InputAudioBufferClearedEvent, + type InputAudioBufferCommitEvent as InputAudioBufferCommitEvent, + type InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent, + type InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, + type InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent, + type RateLimitsUpdatedEvent as RateLimitsUpdatedEvent, + type RealtimeClientEvent as RealtimeClientEvent, + type RealtimeResponse as RealtimeResponse, + type RealtimeResponseStatus as RealtimeResponseStatus, + type RealtimeResponseUsage as RealtimeResponseUsage, + type RealtimeServerEvent as RealtimeServerEvent, + type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, + type ResponseAudioDoneEvent as ResponseAudioDoneEvent, + type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, + type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, + type ResponseCancelEvent as ResponseCancelEvent, + type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, + type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, + type ResponseCreateEvent as ResponseCreateEvent, + type ResponseCreatedEvent as ResponseCreatedEvent, + type ResponseDoneEvent as ResponseDoneEvent, + type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, + type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, + type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, + type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, + type ResponseTextDeltaEvent as ResponseTextDeltaEvent, + type ResponseTextDoneEvent as ResponseTextDoneEvent, + type SessionCreatedEvent as SessionCreatedEvent, + type SessionUpdateEvent as SessionUpdateEvent, + type SessionUpdatedEvent as SessionUpdatedEvent, + type TranscriptionSessionUpdate as TranscriptionSessionUpdate, + type TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent, + }; + export { Sessions as Sessions, type SessionsAPISession as Session, diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index b90d415bd..706d66730 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -2865,6 +2865,89 @@ export interface ResponseRetrieveParams { Responses.InputItems = InputItems; export declare namespace Responses { + export { + type ComputerTool as ComputerTool, + type EasyInputMessage as EasyInputMessage, + type FileSearchTool as FileSearchTool, + type FunctionTool as FunctionTool, + type Response as Response, + type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, + type ResponseAudioDoneEvent as ResponseAudioDoneEvent, + type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, + type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, + type ResponseCodeInterpreterCallCodeDeltaEvent as ResponseCodeInterpreterCallCodeDeltaEvent, + type ResponseCodeInterpreterCallCodeDoneEvent as ResponseCodeInterpreterCallCodeDoneEvent, + type ResponseCodeInterpreterCallCompletedEvent as ResponseCodeInterpreterCallCompletedEvent, + type ResponseCodeInterpreterCallInProgressEvent as ResponseCodeInterpreterCallInProgressEvent, + type ResponseCodeInterpreterCallInterpretingEvent as ResponseCodeInterpreterCallInterpretingEvent, + type ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall, + type ResponseCompletedEvent as ResponseCompletedEvent, + type ResponseComputerToolCall as ResponseComputerToolCall, + type ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem, + type ResponseComputerToolCallOutputScreenshot as ResponseComputerToolCallOutputScreenshot, + type ResponseContent as ResponseContent, + type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, + type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, + type ResponseCreatedEvent as ResponseCreatedEvent, + type ResponseError as ResponseError, + type ResponseErrorEvent as ResponseErrorEvent, + type ResponseFailedEvent as ResponseFailedEvent, + type ResponseFileSearchCallCompletedEvent as ResponseFileSearchCallCompletedEvent, + type ResponseFileSearchCallInProgressEvent as ResponseFileSearchCallInProgressEvent, + type ResponseFileSearchCallSearchingEvent as ResponseFileSearchCallSearchingEvent, + type ResponseFileSearchToolCall as ResponseFileSearchToolCall, + type ResponseFormatTextConfig as ResponseFormatTextConfig, + type ResponseFormatTextJSONSchemaConfig as ResponseFormatTextJSONSchemaConfig, + type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, + type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, + type ResponseFunctionToolCall as ResponseFunctionToolCall, + type ResponseFunctionToolCallItem as ResponseFunctionToolCallItem, + type ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem, + type ResponseFunctionWebSearch as ResponseFunctionWebSearch, + type ResponseInProgressEvent as ResponseInProgressEvent, + type ResponseIncludable as ResponseIncludable, + type ResponseIncompleteEvent as ResponseIncompleteEvent, + type ResponseInput as ResponseInput, + type ResponseInputAudio as ResponseInputAudio, + type ResponseInputContent as ResponseInputContent, + type ResponseInputFile as ResponseInputFile, + type ResponseInputImage as ResponseInputImage, + type ResponseInputItem as ResponseInputItem, + type ResponseInputMessageContentList as ResponseInputMessageContentList, + type ResponseInputMessageItem as ResponseInputMessageItem, + type ResponseInputText as ResponseInputText, + type ResponseItem as ResponseItem, + type ResponseOutputAudio as ResponseOutputAudio, + type ResponseOutputItem as ResponseOutputItem, + type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, + type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, + type ResponseOutputMessage as ResponseOutputMessage, + type ResponseOutputRefusal as ResponseOutputRefusal, + type ResponseOutputText as ResponseOutputText, + type ResponseReasoningItem as ResponseReasoningItem, + type ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent, + type ResponseRefusalDoneEvent as ResponseRefusalDoneEvent, + type ResponseStatus as ResponseStatus, + type ResponseStreamEvent as ResponseStreamEvent, + type ResponseTextAnnotationDeltaEvent as ResponseTextAnnotationDeltaEvent, + type ResponseTextConfig as ResponseTextConfig, + type ResponseTextDeltaEvent as ResponseTextDeltaEvent, + type ResponseTextDoneEvent as ResponseTextDoneEvent, + type ResponseUsage as ResponseUsage, + type ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent, + type ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent, + type ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent, + type Tool as Tool, + type ToolChoiceFunction as ToolChoiceFunction, + type ToolChoiceOptions as ToolChoiceOptions, + type ToolChoiceTypes as ToolChoiceTypes, + type WebSearchTool as WebSearchTool, + type ResponseCreateParams as ResponseCreateParams, + type ResponseCreateParamsNonStreaming as ResponseCreateParamsNonStreaming, + type ResponseCreateParamsStreaming as ResponseCreateParamsStreaming, + type ResponseRetrieveParams as ResponseRetrieveParams, + }; + export { InputItems as InputItems, type ResponseItemList as ResponseItemList, From 7239db018432c10c4c42962b8b3525011d0d375e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 05:07:24 +0000 Subject: [PATCH 465/533] release: 4.89.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 15 +++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 19 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c77dd18b0..05b012220 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.89.0" + ".": "4.89.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 4597d6e56..2bd7f344f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 4.89.1 (2025-03-26) + +Full Changelog: [v4.89.0...v4.89.1](https://github.com/openai/openai-node/compare/v4.89.0...v4.89.1) + +### Bug Fixes + +* avoid type error in certain environments ([#1413](https://github.com/openai/openai-node/issues/1413)) ([d3f6f8f](https://github.com/openai/openai-node/commit/d3f6f8f9c7511a98cc5795756fee49a30e44d485)) +* **client:** remove duplicate types ([#1410](https://github.com/openai/openai-node/issues/1410)) ([338878b](https://github.com/openai/openai-node/commit/338878bf484dac5a4fadf50592b1f8d1045cd4b6)) +* **exports:** add missing type exports ([#1417](https://github.com/openai/openai-node/issues/1417)) ([2d15ada](https://github.com/openai/openai-node/commit/2d15ada0e0d81a4e0d097dddbe99be2222c4c0ef)) + + +### Chores + +* **internal:** version bump ([#1408](https://github.com/openai/openai-node/issues/1408)) ([9c0949a](https://github.com/openai/openai-node/commit/9c0949a93c3e181d327f820dbc2a4b0ad77258e9)) + ## 4.89.0 (2025-03-20) Full Changelog: [v4.88.0...v4.89.0](https://github.com/openai/openai-node/compare/v4.88.0...v4.89.0) diff --git a/jsr.json b/jsr.json index 3e7fdb744..393ef104b 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.89.0", + "version": "4.89.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index a77975fda..29c52a1a5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.89.0", + "version": "4.89.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index dab92ced6..c8c72aa23 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.89.0'; // x-release-please-version +export const VERSION = '4.89.1'; // x-release-please-version From 48921aaabc3456408907e4bcf1cc074a9228c459 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 17:33:07 +0000 Subject: [PATCH 466/533] chore: add hash of OpenAPI spec/config inputs to .stats.yml --- .stats.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.stats.yml b/.stats.yml index 2df281d34..fe9320429 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5ad6884898c07591750dde560118baf7074a59aecd1f367f930c5e42b04e848a.yml +openapi_spec_hash: 0c255269b89767eae26f4d4dc22d3cbd +config_hash: d36e491b0afc4f79e3afad4b3c9bec70 From 6c93a23b79f335a21c65b52d1192890a5325ed6d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 19:33:55 +0000 Subject: [PATCH 467/533] chore(client): expose headers on some streaming errors (#1423) --- src/streaming.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/streaming.ts b/src/streaming.ts index 25b960314..c9cf2fab8 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -3,6 +3,7 @@ import { OpenAIError } from './error'; import { findDoubleNewlineIndex, LineDecoder } from './internal/decoders/line'; import { ReadableStreamToAsyncIterable } from './internal/stream-utils'; +import { createResponseHeaders } from './core'; import { APIError } from './error'; type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; @@ -53,7 +54,7 @@ export class Stream implements AsyncIterable { } if (data && data.error) { - throw new APIError(undefined, data.error, undefined, undefined); + throw new APIError(undefined, data.error, undefined, createResponseHeaders(response.headers)); } yield data; From fb0e96a7fa8c020ac6109951bb36f9a4ada24d03 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 20:32:39 +0000 Subject: [PATCH 468/533] chore(api): updates to supported Voice IDs (#1424) --- .stats.yml | 4 +- src/resources/audio/speech.ts | 18 +++++- src/resources/beta/realtime/realtime.ts | 61 +++++++++++++++---- src/resources/beta/realtime/sessions.ts | 47 ++++++++++++-- .../beta/realtime/transcription-sessions.ts | 4 +- src/resources/chat/completions/completions.ts | 14 ++++- src/resources/responses/input-items.ts | 6 ++ src/resources/responses/responses.ts | 20 +++--- tests/api-resources/audio/speech.test.ts | 2 +- .../chat/completions/completions.test.ts | 2 +- .../responses/input-items.test.ts | 2 +- 11 files changed, 143 insertions(+), 37 deletions(-) diff --git a/.stats.yml b/.stats.yml index fe9320429..4d1276a5e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5ad6884898c07591750dde560118baf7074a59aecd1f367f930c5e42b04e848a.yml -openapi_spec_hash: 0c255269b89767eae26f4d4dc22d3cbd +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml +openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e config_hash: d36e491b0afc4f79e3afad4b3c9bec70 diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 4324028d5..4b99ee5f4 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -34,11 +34,23 @@ export interface SpeechCreateParams { /** * The voice to use when generating the audio. Supported voices are `alloy`, `ash`, - * `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the - * voices are available in the + * `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + * `verse`. Previews of the voices are available in the * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). */ - voice: 'alloy' | 'ash' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer'; + voice: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; /** * Control the voice of your generated audio with additional instructions. Does not diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 224d94f37..1c02fdd1a 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -1005,9 +1005,22 @@ export interface RealtimeResponse { /** * The voice the model used to respond. Current voice options are `alloy`, `ash`, - * `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. - */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + * `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + * `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } /** @@ -1620,9 +1633,22 @@ export namespace ResponseCreateEvent { /** * The voice the model uses to respond. Voice cannot be changed during the session * once the model has responded with audio at least once. Current voice options are - * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. - */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + * `shimmer`, and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace Response { @@ -2078,9 +2104,22 @@ export namespace SessionUpdateEvent { /** * The voice the model uses to respond. Voice cannot be changed during the session * once the model has responded with audio at least once. Current voice options are - * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. - */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + * `shimmer`, and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace Session { @@ -2376,7 +2415,7 @@ export namespace TranscriptionSessionUpdate { export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. + * occurs. Not available for transcription sessions. */ create_response?: boolean; @@ -2390,7 +2429,7 @@ export namespace TranscriptionSessionUpdate { /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. + * occurs. Not available for transcription sessions. */ interrupt_response?: boolean; diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index bae50124e..28a44431e 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -139,7 +139,19 @@ export interface Session { * once the model has responded with audio at least once. Current voice options are * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace Session { @@ -361,7 +373,19 @@ export interface SessionCreateResponse { * once the model has responded with audio at least once. Current voice options are * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace SessionCreateResponse { @@ -561,9 +585,22 @@ export interface SessionCreateParams { /** * The voice the model uses to respond. Voice cannot be changed during the session * once the model has responded with audio at least once. Current voice options are - * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. - */ - voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + * `shimmer`, and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } export namespace SessionCreateParams { diff --git a/src/resources/beta/realtime/transcription-sessions.ts b/src/resources/beta/realtime/transcription-sessions.ts index d749f8502..a54ec1125 100644 --- a/src/resources/beta/realtime/transcription-sessions.ts +++ b/src/resources/beta/realtime/transcription-sessions.ts @@ -255,7 +255,7 @@ export namespace TranscriptionSessionCreateParams { export interface TurnDetection { /** * Whether or not to automatically generate a response when a VAD stop event - * occurs. + * occurs. Not available for transcription sessions. */ create_response?: boolean; @@ -269,7 +269,7 @@ export namespace TranscriptionSessionCreateParams { /** * Whether or not to automatically interrupt any ongoing response with output to * the default conversation (i.e. `conversation` of `auto`) when a VAD start event - * occurs. + * occurs. Not available for transcription sessions. */ interrupt_response?: boolean; diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 08bf7f8db..f0ef1d0cc 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -325,7 +325,19 @@ export interface ChatCompletionAudioParam { * The voice the model uses to respond. Supported voices are `alloy`, `ash`, * `ballad`, `coral`, `echo`, `sage`, and `shimmer`. */ - voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; + voice: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; } /** diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index f2292e5c6..c88bb441d 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -71,6 +71,12 @@ export interface InputItemListParams extends CursorPageParams { */ before?: string; + /** + * Additional fields to include in the response. See the `include` parameter for + * Response creation above for more information. + */ + include?: Array; + /** * The order to return the input items in. Default is `asc`. * diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 706d66730..6c9f58b43 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -305,8 +305,8 @@ export interface Response { * context. * * When using along with `previous_response_id`, the instructions from a previous - * response will be not be carried over to the next response. This makes it simple - * to swap out system (or developer) messages in new responses. + * response will not be carried over to the next response. This makes it simple to + * swap out system (or developer) messages in new responses. */ instructions: string | null; @@ -1356,6 +1356,12 @@ export type ResponseFormatTextConfig = * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). */ export interface ResponseFormatTextJSONSchemaConfig { + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + */ + name: string; + /** * The schema for the response format, described as a JSON Schema object. Learn how * to build JSON schemas [here](https://json-schema.org/). @@ -1373,12 +1379,6 @@ export interface ResponseFormatTextJSONSchemaConfig { */ description?: string; - /** - * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores - * and dashes, with a maximum length of 64. - */ - name?: string; - /** * Whether to enable strict schema adherence when generating the output. If set to * true, the model will always follow the exact schema defined in the `schema` @@ -2698,8 +2698,8 @@ export interface ResponseCreateParamsBase { * context. * * When using along with `previous_response_id`, the instructions from a previous - * response will be not be carried over to the next response. This makes it simple - * to swap out system (or developer) messages in new responses. + * response will not be carried over to the next response. This makes it simple to + * swap out system (or developer) messages in new responses. */ instructions?: string | null; diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts index cbec6cfac..191c6a313 100644 --- a/tests/api-resources/audio/speech.test.ts +++ b/tests/api-resources/audio/speech.test.ts @@ -13,7 +13,7 @@ describe('resource speech', () => { const response = await client.audio.speech.create({ input: 'input', model: 'string', - voice: 'alloy', + voice: 'ash', instructions: 'instructions', response_format: 'mp3', speed: 0.25, diff --git a/tests/api-resources/chat/completions/completions.test.ts b/tests/api-resources/chat/completions/completions.test.ts index eddf252b1..60c23591a 100644 --- a/tests/api-resources/chat/completions/completions.test.ts +++ b/tests/api-resources/chat/completions/completions.test.ts @@ -27,7 +27,7 @@ describe('resource completions', () => { const response = await client.chat.completions.create({ messages: [{ content: 'string', role: 'developer', name: 'name' }], model: 'gpt-4o', - audio: { format: 'wav', voice: 'alloy' }, + audio: { format: 'wav', voice: 'ash' }, frequency_penalty: -2, function_call: 'none', functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], diff --git a/tests/api-resources/responses/input-items.test.ts b/tests/api-resources/responses/input-items.test.ts index 51b86f1b3..25ab166c0 100644 --- a/tests/api-resources/responses/input-items.test.ts +++ b/tests/api-resources/responses/input-items.test.ts @@ -32,7 +32,7 @@ describe('resource inputItems', () => { await expect( client.responses.inputItems.list( 'response_id', - { after: 'after', before: 'before', limit: 0, order: 'asc' }, + { after: 'after', before: 'before', include: ['file_search_call.results'], limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); From 16c67be29bbb976660ac01d76f54e8735e71c1e0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 16:07:24 +0000 Subject: [PATCH 469/533] feat(api): add `get /chat/completions` endpoint --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 4d1276a5e..1e1104a06 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: d36e491b0afc4f79e3afad4b3c9bec70 +config_hash: 9351ea829c2b41da3b48a38c934c92ee From 9c7d352181c690156e26c9538c00edff6db5b384 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 27 Mar 2025 16:56:35 +0000 Subject: [PATCH 470/533] fix(audio): correctly handle transcription streaming --- src/resources/audio/transcriptions.ts | 7 ++++++- src/streaming.ts | 6 +++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 7f797c709..ba4fec6c5 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -40,7 +40,12 @@ export class Transcriptions extends APIResource { ): Core.APIPromise> { return this._client.post( '/audio/transcriptions', - Core.multipartFormRequestOptions({ body, ...options, __metadata: { model: body.model } }), + Core.multipartFormRequestOptions({ + body, + ...options, + stream: body.stream ?? false, + __metadata: { model: body.model }, + }), ); } } diff --git a/src/streaming.ts b/src/streaming.ts index c9cf2fab8..ee25daca6 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -42,7 +42,11 @@ export class Stream implements AsyncIterable { continue; } - if (sse.event === null || sse.event.startsWith('response.')) { + if ( + sse.event === null || + sse.event.startsWith('response.') || + sse.event.startsWith('transcript.') + ) { let data; try { From 84edc62d05eddaefee0973f9687fcfdd43b0afa9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 17:19:39 +0000 Subject: [PATCH 471/533] fix(internal): work around https://github.com/vercel/next.js/issues/76881 (#1427) --- src/_shims/index-deno.ts | 2 ++ src/_shims/index.d.ts | 2 ++ src/_shims/index.js | 6 +++++- src/_shims/index.mjs | 6 +++++- src/core.ts | 5 +++++ 5 files changed, 19 insertions(+), 2 deletions(-) diff --git a/src/_shims/index-deno.ts b/src/_shims/index-deno.ts index d9eabb5a9..e83c7a6d3 100644 --- a/src/_shims/index-deno.ts +++ b/src/_shims/index-deno.ts @@ -108,3 +108,5 @@ export declare class FsReadStream extends Readable { const _ReadableStream = ReadableStream; type _ReadableStream = ReadableStream; export { _ReadableStream as ReadableStream }; + +export const init = () => {}; diff --git a/src/_shims/index.d.ts b/src/_shims/index.d.ts index d867b293b..107cf7fd6 100644 --- a/src/_shims/index.d.ts +++ b/src/_shims/index.d.ts @@ -79,3 +79,5 @@ export function fileFromPath(path: string, options?: FileFromPathOptions): Promi export function fileFromPath(path: string, filename?: string, options?: FileFromPathOptions): Promise; export function isFsReadStream(value: any): value is FsReadStream; + +export const init: () => void; diff --git a/src/_shims/index.js b/src/_shims/index.js index b5fc8229e..959f2b9ce 100644 --- a/src/_shims/index.js +++ b/src/_shims/index.js @@ -3,7 +3,9 @@ */ const shims = require('./registry'); const auto = require('openai/_shims/auto/runtime'); -if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +exports.init = () => { + if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +}; for (const property of Object.keys(shims)) { Object.defineProperty(exports, property, { get() { @@ -11,3 +13,5 @@ for (const property of Object.keys(shims)) { }, }); } + +exports.init(); diff --git a/src/_shims/index.mjs b/src/_shims/index.mjs index 81665e610..26d7a716c 100644 --- a/src/_shims/index.mjs +++ b/src/_shims/index.mjs @@ -3,5 +3,9 @@ */ import * as shims from './registry.mjs'; import * as auto from 'openai/_shims/auto/runtime'; -if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +export const init = () => { + if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +}; export * from './registry.mjs'; + +init(); diff --git a/src/core.ts b/src/core.ts index 1e1cb0a4a..0dedc53eb 100644 --- a/src/core.ts +++ b/src/core.ts @@ -17,7 +17,12 @@ import { type RequestInit, type Response, type HeadersInit, + init, } from './_shims/index'; + +// try running side effects outside of _shims/index to workaround https://github.com/vercel/next.js/issues/76881 +init(); + export { type Response }; import { BlobLike, isBlobLike, isMultipartBody } from './uploads'; export { From 8ab47e2b569216d5c67a95e5512a3b09cbc7d261 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 17:20:08 +0000 Subject: [PATCH 472/533] release: 4.90.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 21 +++++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 25 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 05b012220..7b04494d6 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.89.1" + ".": "4.90.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bd7f344f..89523001a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## 4.90.0 (2025-03-27) + +Full Changelog: [v4.89.1...v4.90.0](https://github.com/openai/openai-node/compare/v4.89.1...v4.90.0) + +### Features + +* **api:** add `get /chat/completions` endpoint ([2d6710a](https://github.com/openai/openai-node/commit/2d6710a1f9dd4f768d9c73e9c9f5f93c737cdc66)) + + +### Bug Fixes + +* **audio:** correctly handle transcription streaming ([2a9b603](https://github.com/openai/openai-node/commit/2a9b60336cd40a4d4fb9b898ece49170ad648fd0)) +* **internal:** work around https://github.com/vercel/next.js/issues/76881 ([#1427](https://github.com/openai/openai-node/issues/1427)) ([b467e94](https://github.com/openai/openai-node/commit/b467e949476621e8e92587a83c9de6fab35b2b9d)) + + +### Chores + +* add hash of OpenAPI spec/config inputs to .stats.yml ([45db35e](https://github.com/openai/openai-node/commit/45db35e34be560c75bf36224cc153c6d0e6e2a88)) +* **api:** updates to supported Voice IDs ([#1424](https://github.com/openai/openai-node/issues/1424)) ([404f4db](https://github.com/openai/openai-node/commit/404f4db41a2ee651f5bfdaa7b8881e1bf015f058)) +* **client:** expose headers on some streaming errors ([#1423](https://github.com/openai/openai-node/issues/1423)) ([b0783cc](https://github.com/openai/openai-node/commit/b0783cc6221b68f1738e759b393756a7d0e540a3)) + ## 4.89.1 (2025-03-26) Full Changelog: [v4.89.0...v4.89.1](https://github.com/openai/openai-node/compare/v4.89.0...v4.89.1) diff --git a/jsr.json b/jsr.json index 393ef104b..98c8e6959 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.89.1", + "version": "4.90.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 29c52a1a5..408e50a73 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.89.1", + "version": "4.90.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c8c72aa23..03d899bdd 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.89.1'; // x-release-please-version +export const VERSION = '4.90.0'; // x-release-please-version From 85175d9644348e61d9779ce0cb15cfef4aa87a1d Mon Sep 17 00:00:00 2001 From: Khai Tran Date: Fri, 28 Mar 2025 09:44:45 -0700 Subject: [PATCH 473/533] update --- ecosystem-tests/vercel-edge/package-lock.json | 625 +++--------------- ecosystem-tests/vercel-edge/package.json | 2 +- 2 files changed, 79 insertions(+), 548 deletions(-) diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json index 770dc460a..aaca4370c 100644 --- a/ecosystem-tests/vercel-edge/package-lock.json +++ b/ecosystem-tests/vercel-edge/package-lock.json @@ -9,7 +9,7 @@ "version": "0.1.0", "dependencies": { "ai": "2.1.34", - "next": "^15.2.3", + "next": "^14.2.25", "react": "18.2.0", "react-dom": "18.2.0" }, @@ -777,16 +777,6 @@ "node": ">=16" } }, - "node_modules/@emnapi/runtime": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.3.1.tgz", - "integrity": "sha512-kEBmG8KyqtxJZv+ygbEim+KCGtIq1fC22Ms3S4ziXmYKm8uyoLX0MHONVKwp+9opg390VaKRNt4a7A9NwmpNhw==", - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, "node_modules/@hapi/hoek": { "version": "9.3.0", "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", @@ -802,367 +792,6 @@ "@hapi/hoek": "^9.0.0" } }, - "node_modules/@img/sharp-darwin-arm64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.33.5.tgz", - "integrity": "sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==", - "cpu": [ - "arm64" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-darwin-arm64": "1.0.4" - } - }, - "node_modules/@img/sharp-darwin-x64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.33.5.tgz", - "integrity": "sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q==", - "cpu": [ - "x64" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-darwin-x64": "1.0.4" - } - }, - "node_modules/@img/sharp-libvips-darwin-arm64": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.0.4.tgz", - "integrity": "sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg==", - "cpu": [ - "arm64" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "darwin" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-darwin-x64": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.0.4.tgz", - "integrity": "sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ==", - "cpu": [ - "x64" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "darwin" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-linux-arm": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.0.5.tgz", - "integrity": "sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==", - "cpu": [ - "arm" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "linux" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-linux-arm64": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.0.4.tgz", - "integrity": "sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA==", - "cpu": [ - "arm64" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "linux" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-linux-s390x": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.0.4.tgz", - "integrity": "sha512-u7Wz6ntiSSgGSGcjZ55im6uvTrOxSIS8/dgoVMoiGE9I6JAfU50yH5BoDlYA1tcuGS7g/QNtetJnxA6QEsCVTA==", - "cpu": [ - "s390x" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "linux" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-linux-x64": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.0.4.tgz", - "integrity": "sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==", - "cpu": [ - "x64" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "linux" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-linuxmusl-arm64": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.0.4.tgz", - "integrity": "sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA==", - "cpu": [ - "arm64" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "linux" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-libvips-linuxmusl-x64": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.0.4.tgz", - "integrity": "sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw==", - "cpu": [ - "x64" - ], - "license": "LGPL-3.0-or-later", - "optional": true, - "os": [ - "linux" - ], - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-linux-arm": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.33.5.tgz", - "integrity": "sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==", - "cpu": [ - "arm" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-linux-arm": "1.0.5" - } - }, - "node_modules/@img/sharp-linux-arm64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.33.5.tgz", - "integrity": "sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA==", - "cpu": [ - "arm64" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-linux-arm64": "1.0.4" - } - }, - "node_modules/@img/sharp-linux-s390x": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.33.5.tgz", - "integrity": "sha512-y/5PCd+mP4CA/sPDKl2961b+C9d+vPAveS33s6Z3zfASk2j5upL6fXVPZi7ztePZ5CuH+1kW8JtvxgbuXHRa4Q==", - "cpu": [ - "s390x" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-linux-s390x": "1.0.4" - } - }, - "node_modules/@img/sharp-linux-x64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.33.5.tgz", - "integrity": "sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==", - "cpu": [ - "x64" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-linux-x64": "1.0.4" - } - }, - "node_modules/@img/sharp-linuxmusl-arm64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.33.5.tgz", - "integrity": "sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g==", - "cpu": [ - "arm64" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-linuxmusl-arm64": "1.0.4" - } - }, - "node_modules/@img/sharp-linuxmusl-x64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.33.5.tgz", - "integrity": "sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw==", - "cpu": [ - "x64" - ], - "license": "Apache-2.0", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-libvips-linuxmusl-x64": "1.0.4" - } - }, - "node_modules/@img/sharp-wasm32": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.33.5.tgz", - "integrity": "sha512-ykUW4LVGaMcU9lu9thv85CbRMAwfeadCJHRsg2GmeRa/cJxsVY9Rbd57JcMxBkKHag5U/x7TSBpScF4U8ElVzg==", - "cpu": [ - "wasm32" - ], - "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT", - "optional": true, - "dependencies": { - "@emnapi/runtime": "^1.2.0" - }, - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-win32-ia32": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.33.5.tgz", - "integrity": "sha512-T36PblLaTwuVJ/zw/LaH0PdZkRz5rd3SmMHX8GSmR7vtNSP5Z6bQkExdSK7xGWyxLw4sUknBuugTelgw2faBbQ==", - "cpu": [ - "ia32" - ], - "license": "Apache-2.0 AND LGPL-3.0-or-later", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - } - }, - "node_modules/@img/sharp-win32-x64": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.33.5.tgz", - "integrity": "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg==", - "cpu": [ - "x64" - ], - "license": "Apache-2.0 AND LGPL-3.0-or-later", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - } - }, "node_modules/@istanbuljs/load-nyc-config": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", @@ -1551,19 +1180,17 @@ } }, "node_modules/@next/env": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/env/-/env-15.2.3.tgz", - "integrity": "sha512-a26KnbW9DFEUsSxAxKBORR/uD9THoYoKbkpFywMN/AFvboTt94b8+g/07T8J6ACsdLag8/PDU60ov4rPxRAixw==", - "license": "MIT" + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz", + "integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==" }, "node_modules/@next/swc-darwin-arm64": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.2.3.tgz", - "integrity": "sha512-uaBhA8aLbXLqwjnsHSkxs353WrRgQgiFjduDpc7YXEU0B54IKx3vU+cxQlYwPCyC8uYEEX7THhtQQsfHnvv8dw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz", + "integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==", "cpu": [ "arm64" ], - "license": "MIT", "optional": true, "os": [ "darwin" @@ -1573,13 +1200,12 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.2.3.tgz", - "integrity": "sha512-pVwKvJ4Zk7h+4hwhqOUuMx7Ib02u3gDX3HXPKIShBi9JlYllI0nU6TWLbPT94dt7FSi6mSBhfc2JrHViwqbOdw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz", + "integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==", "cpu": [ "x64" ], - "license": "MIT", "optional": true, "os": [ "darwin" @@ -1589,13 +1215,12 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.2.3.tgz", - "integrity": "sha512-50ibWdn2RuFFkOEUmo9NCcQbbV9ViQOrUfG48zHBCONciHjaUKtHcYFiCwBVuzD08fzvzkWuuZkd4AqbvKO7UQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz", + "integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==", "cpu": [ "arm64" ], - "license": "MIT", "optional": true, "os": [ "linux" @@ -1605,13 +1230,12 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.2.3.tgz", - "integrity": "sha512-2gAPA7P652D3HzR4cLyAuVYwYqjG0mt/3pHSWTCyKZq/N/dJcUAEoNQMyUmwTZWCJRKofB+JPuDVP2aD8w2J6Q==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz", + "integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==", "cpu": [ "arm64" ], - "license": "MIT", "optional": true, "os": [ "linux" @@ -1621,13 +1245,12 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.2.3.tgz", - "integrity": "sha512-ODSKvrdMgAJOVU4qElflYy1KSZRM3M45JVbeZu42TINCMG3anp7YCBn80RkISV6bhzKwcUqLBAmOiWkaGtBA9w==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz", + "integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==", "cpu": [ "x64" ], - "license": "MIT", "optional": true, "os": [ "linux" @@ -1637,13 +1260,12 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.2.3.tgz", - "integrity": "sha512-ZR9kLwCWrlYxwEoytqPi1jhPd1TlsSJWAc+H/CJHmHkf2nD92MQpSRIURR1iNgA/kuFSdxB8xIPt4p/T78kwsg==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz", + "integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==", "cpu": [ "x64" ], - "license": "MIT", "optional": true, "os": [ "linux" @@ -1653,13 +1275,27 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.2.3.tgz", - "integrity": "sha512-+G2FrDcfm2YDbhDiObDU/qPriWeiz/9cRR0yMWJeTLGGX6/x8oryO3tt7HhodA1vZ8r2ddJPCjtLcpaVl7TE2Q==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz", + "integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==", "cpu": [ "arm64" ], - "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-ia32-msvc": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz", + "integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==", + "cpu": [ + "ia32" + ], "optional": true, "os": [ "win32" @@ -1669,13 +1305,12 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.2.3.tgz", - "integrity": "sha512-gHYS9tc+G2W0ZC8rBL+H6RdtXIyk40uLiaos0yj5US85FNhbFEndMA2nW3z47nzOWiSvXTZ5kBClc3rD0zJg0w==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz", + "integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==", "cpu": [ "x64" ], - "license": "MIT", "optional": true, "os": [ "win32" @@ -1786,16 +1421,15 @@ "node_modules/@swc/counter": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", - "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", - "license": "Apache-2.0" + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==" }, "node_modules/@swc/helpers": { - "version": "0.5.15", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz", - "integrity": "sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==", - "license": "Apache-2.0", + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", "dependencies": { - "tslib": "^2.8.0" + "@swc/counter": "^0.1.3", + "tslib": "^2.4.0" } }, "node_modules/@ts-morph/common": { @@ -3023,8 +2657,7 @@ "node_modules/client-only": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", - "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", - "license": "MIT" + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==" }, "node_modules/cliui": { "version": "8.0.1", @@ -3075,25 +2708,11 @@ "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", "dev": true }, - "node_modules/color": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz", - "integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==", - "license": "MIT", - "optional": true, - "dependencies": { - "color-convert": "^2.0.1", - "color-string": "^1.9.0" - }, - "engines": { - "node": ">=12.5.0" - } - }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "devOptional": true, + "dev": true, "dependencies": { "color-name": "~1.1.4" }, @@ -3105,18 +2724,7 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "devOptional": true - }, - "node_modules/color-string": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", - "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", - "license": "MIT", - "optional": true, - "dependencies": { - "color-name": "^1.0.0", - "simple-swizzle": "^0.2.2" - } + "dev": true }, "node_modules/color-support": { "version": "1.1.3", @@ -3301,7 +2909,7 @@ "version": "2.0.3", "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", - "devOptional": true, + "dev": true, "license": "Apache-2.0", "engines": { "node": ">=8" @@ -4191,8 +3799,7 @@ "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "dev": true + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" }, "node_modules/has-flag": { "version": "4.0.0", @@ -5461,42 +5068,40 @@ "dev": true }, "node_modules/next": { - "version": "15.2.3", - "resolved": "https://registry.npmjs.org/next/-/next-15.2.3.tgz", - "integrity": "sha512-x6eDkZxk2rPpu46E1ZVUWIBhYCLszmUY6fvHBFcbzJ9dD+qRX6vcHusaqqDlnY+VngKzKbAiG2iRCkPbmi8f7w==", - "license": "MIT", - "dependencies": { - "@next/env": "15.2.3", - "@swc/counter": "0.1.3", - "@swc/helpers": "0.5.15", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz", + "integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==", + "dependencies": { + "@next/env": "14.2.25", + "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", + "graceful-fs": "^4.2.11", "postcss": "8.4.31", - "styled-jsx": "5.1.6" + "styled-jsx": "5.1.1" }, "bin": { "next": "dist/bin/next" }, "engines": { - "node": "^18.18.0 || ^19.8.0 || >= 20.0.0" + "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "15.2.3", - "@next/swc-darwin-x64": "15.2.3", - "@next/swc-linux-arm64-gnu": "15.2.3", - "@next/swc-linux-arm64-musl": "15.2.3", - "@next/swc-linux-x64-gnu": "15.2.3", - "@next/swc-linux-x64-musl": "15.2.3", - "@next/swc-win32-arm64-msvc": "15.2.3", - "@next/swc-win32-x64-msvc": "15.2.3", - "sharp": "^0.33.5" + "@next/swc-darwin-arm64": "14.2.25", + "@next/swc-darwin-x64": "14.2.25", + "@next/swc-linux-arm64-gnu": "14.2.25", + "@next/swc-linux-arm64-musl": "14.2.25", + "@next/swc-linux-x64-gnu": "14.2.25", + "@next/swc-linux-x64-musl": "14.2.25", + "@next/swc-win32-arm64-msvc": "14.2.25", + "@next/swc-win32-ia32-msvc": "14.2.25", + "@next/swc-win32-x64-msvc": "14.2.25" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", "@playwright/test": "^1.41.2", - "babel-plugin-react-compiler": "*", - "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", - "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", + "react": "^18.2.0", + "react-dom": "^18.2.0", "sass": "^1.3.0" }, "peerDependenciesMeta": { @@ -5506,9 +5111,6 @@ "@playwright/test": { "optional": true }, - "babel-plugin-react-compiler": { - "optional": true - }, "sass": { "optional": true } @@ -6201,59 +5803,6 @@ "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", "dev": true }, - "node_modules/sharp": { - "version": "0.33.5", - "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.33.5.tgz", - "integrity": "sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw==", - "hasInstallScript": true, - "license": "Apache-2.0", - "optional": true, - "dependencies": { - "color": "^4.2.3", - "detect-libc": "^2.0.3", - "semver": "^7.6.3" - }, - "engines": { - "node": "^18.17.0 || ^20.3.0 || >=21.0.0" - }, - "funding": { - "url": "https://opencollective.com/libvips" - }, - "optionalDependencies": { - "@img/sharp-darwin-arm64": "0.33.5", - "@img/sharp-darwin-x64": "0.33.5", - "@img/sharp-libvips-darwin-arm64": "1.0.4", - "@img/sharp-libvips-darwin-x64": "1.0.4", - "@img/sharp-libvips-linux-arm": "1.0.5", - "@img/sharp-libvips-linux-arm64": "1.0.4", - "@img/sharp-libvips-linux-s390x": "1.0.4", - "@img/sharp-libvips-linux-x64": "1.0.4", - "@img/sharp-libvips-linuxmusl-arm64": "1.0.4", - "@img/sharp-libvips-linuxmusl-x64": "1.0.4", - "@img/sharp-linux-arm": "0.33.5", - "@img/sharp-linux-arm64": "0.33.5", - "@img/sharp-linux-s390x": "0.33.5", - "@img/sharp-linux-x64": "0.33.5", - "@img/sharp-linuxmusl-arm64": "0.33.5", - "@img/sharp-linuxmusl-x64": "0.33.5", - "@img/sharp-wasm32": "0.33.5", - "@img/sharp-win32-ia32": "0.33.5", - "@img/sharp-win32-x64": "0.33.5" - } - }, - "node_modules/sharp/node_modules/semver": { - "version": "7.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", - "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", - "license": "ISC", - "optional": true, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -6287,23 +5836,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/simple-swizzle": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", - "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", - "license": "MIT", - "optional": true, - "dependencies": { - "is-arrayish": "^0.3.1" - } - }, - "node_modules/simple-swizzle/node_modules/is-arrayish": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==", - "license": "MIT", - "optional": true - }, "node_modules/sisteransi": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", @@ -6535,10 +6067,9 @@ } }, "node_modules/styled-jsx": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.6.tgz", - "integrity": "sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==", - "license": "MIT", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz", + "integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==", "dependencies": { "client-only": "0.0.1" }, @@ -6546,7 +6077,7 @@ "node": ">= 12.0.0" }, "peerDependencies": { - "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0" + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0" }, "peerDependenciesMeta": { "@babel/core": { diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json index 420bca941..5a8fea816 100644 --- a/ecosystem-tests/vercel-edge/package.json +++ b/ecosystem-tests/vercel-edge/package.json @@ -15,7 +15,7 @@ }, "dependencies": { "ai": "2.1.34", - "next": "^15.2.3", + "next": "^14.2.25", "react": "18.2.0", "react-dom": "18.2.0" }, From 3676d34127cd88a67dde6e6d24f2b6a7b65d3073 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 19:41:56 +0000 Subject: [PATCH 474/533] feat(api): add `get /responses/{response_id}/input_items` endpoint --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 1e1104a06..f6a90d243 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 9351ea829c2b41da3b48a38c934c92ee +config_hash: e25e31d8446b6bc0e3ef7103b6993cce From be00d29fadb2b78920bcae1e6e72750bc6f973a4 Mon Sep 17 00:00:00 2001 From: Wassim Chegham Date: Fri, 28 Mar 2025 21:46:46 +0100 Subject: [PATCH 475/533] perf(embedding): default embedding creation to base64 (#1312) * perf(embedding): always request embedding creation as base64 Requesting base64 encoded embeddings returns smaller body sizes, on average ~60% smaller than float32 encoded. In other words, the size of the response body containing embeddings in float32 is ~2.3x bigger than base64 encoded embedding. We always request embedding creating encoded as base64, and then decoded them to float32 based on the user's provided encoding_format parameter. Closes #1310 Co-authored-by: Robert Craigie --- src/core.ts | 21 +++++++++++++ src/resources/embeddings.ts | 42 ++++++++++++++++++++++++-- tests/api-resources/embeddings.test.ts | 31 +++++++++++++++++++ 3 files changed, 92 insertions(+), 2 deletions(-) diff --git a/src/core.ts b/src/core.ts index 0dedc53eb..a3f664906 100644 --- a/src/core.ts +++ b/src/core.ts @@ -1287,6 +1287,27 @@ export const toBase64 = (str: string | null | undefined): string => { throw new OpenAIError('Cannot generate b64 string; Expected `Buffer` or `btoa` to be defined'); }; +/** + * Converts a Base64 encoded string to a Float32Array. + * @param base64Str - The Base64 encoded string. + * @returns An Array of numbers interpreted as Float32 values. + */ +export const toFloat32Array = (base64Str: string): Array => { + if (typeof Buffer !== 'undefined') { + // for Node.js environment + return Array.from(new Float32Array(Buffer.from(base64Str, 'base64').buffer)); + } else { + // for legacy web platform APIs + const binaryStr = atob(base64Str); + const len = binaryStr.length; + const bytes = new Uint8Array(len); + for (let i = 0; i < len; i++) { + bytes[i] = binaryStr.charCodeAt(i); + } + return Array.from(new Float32Array(bytes.buffer)); + } +}; + export function isObj(obj: unknown): obj is Record { return obj != null && typeof obj === 'object' && !Array.isArray(obj); } diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index d01ffc807..a4be9ca3c 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -9,9 +9,47 @@ export class Embeddings extends APIResource { */ create( body: EmbeddingCreateParams, - options?: Core.RequestOptions, + options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/embeddings', { body, ...options }); + const hasUserProvidedEncodingFormat = !!body.encoding_format; + // No encoding_format specified, defaulting to base64 for performance reasons + // See https://github.com/openai/openai-node/pull/1312 + let encoding_format: EmbeddingCreateParams['encoding_format'] = + hasUserProvidedEncodingFormat ? body.encoding_format : 'base64'; + + if (hasUserProvidedEncodingFormat) { + Core.debug('Request', 'User defined encoding_format:', body.encoding_format); + } + + const response: Core.APIPromise = this._client.post('/embeddings', { + body: { + ...body, + encoding_format: encoding_format as EmbeddingCreateParams['encoding_format'], + }, + ...options, + }); + + // if the user specified an encoding_format, return the response as-is + if (hasUserProvidedEncodingFormat) { + return response; + } + + // in this stage, we are sure the user did not specify an encoding_format + // and we defaulted to base64 for performance reasons + // we are sure then that the response is base64 encoded, let's decode it + // the returned result will be a float32 array since this is OpenAI API's default encoding + Core.debug('response', 'Decoding base64 embeddings to float32 array'); + + return (response as Core.APIPromise)._thenUnwrap((response) => { + if (response && response.data) { + response.data.forEach((embeddingBase64Obj) => { + const embeddingBase64Str = embeddingBase64Obj.embedding as unknown as string; + embeddingBase64Obj.embedding = Core.toFloat32Array(embeddingBase64Str); + }); + } + + return response; + }); } } diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts index 46dd1b2a3..e226ade9e 100644 --- a/tests/api-resources/embeddings.test.ts +++ b/tests/api-resources/embeddings.test.ts @@ -32,4 +32,35 @@ describe('resource embeddings', () => { user: 'user-1234', }); }); + + test('create: encoding_format=float should create float32 embeddings', async () => { + const response = await client.embeddings.create({ + input: 'The quick brown fox jumped over the lazy dog', + model: 'text-embedding-3-small', + }); + + expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); + expect(Number.isFinite(response.data?.at(0)?.embedding.at(0))).toBe(true); + }); + + test('create: encoding_format=base64 should create float32 embeddings', async () => { + const response = await client.embeddings.create({ + input: 'The quick brown fox jumped over the lazy dog', + model: 'text-embedding-3-small', + encoding_format: 'base64', + }); + + expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); + expect(Number.isFinite(response.data?.at(0)?.embedding.at(0))).toBe(true); + }); + + test('create: encoding_format=default should create float32 embeddings', async () => { + const response = await client.embeddings.create({ + input: 'The quick brown fox jumped over the lazy dog', + model: 'text-embedding-3-small', + }); + + expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); + expect(Number.isFinite(response.data?.at(0)?.embedding.at(0))).toBe(true); + }); }); From ca69782d2eb83e01d56ea81637133caadddef786 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 31 Mar 2025 14:58:34 +0000 Subject: [PATCH 476/533] release: 4.91.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7b04494d6..f6df5bd5c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.90.0" + ".": "4.91.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 89523001a..8cf3201bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.91.0 (2025-03-31) + +Full Changelog: [v4.90.0...v4.91.0](https://github.com/openai/openai-node/compare/v4.90.0...v4.91.0) + +### Features + +* **api:** add `get /responses/{response_id}/input_items` endpoint ([ef0e0ac](https://github.com/openai/openai-node/commit/ef0e0acd469379ae6f2745c83e6c6813ff7b4edc)) + + +### Performance Improvements + +* **embedding:** default embedding creation to base64 ([#1312](https://github.com/openai/openai-node/issues/1312)) ([e54530e](https://github.com/openai/openai-node/commit/e54530e4f6f00d7d74fc8636bbdb6f6280548750)), closes [#1310](https://github.com/openai/openai-node/issues/1310) + ## 4.90.0 (2025-03-27) Full Changelog: [v4.89.1...v4.90.0](https://github.com/openai/openai-node/compare/v4.89.1...v4.90.0) diff --git a/jsr.json b/jsr.json index 98c8e6959..4595ab4b7 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.90.0", + "version": "4.91.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 408e50a73..089656265 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.90.0", + "version": "4.91.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 03d899bdd..0095d88c8 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.90.0'; // x-release-please-version +export const VERSION = '4.91.0'; // x-release-please-version From 71950f6e891ba0813c25b2992db93a61ef6c9664 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 00:26:27 +0000 Subject: [PATCH 477/533] chore: Remove deprecated/unused remote spec feature --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index f6a90d243..2ccfd3411 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: e25e31d8446b6bc0e3ef7103b6993cce +config_hash: 2daae06cc598821ccf87201de0861e40 From 1847673de09586c809e1057a6b08c604471e13ff Mon Sep 17 00:00:00 2001 From: stainless-bot Date: Tue, 1 Apr 2025 14:36:41 -0400 Subject: [PATCH 478/533] fix(docs): correct docstring on responses.stream --- src/resources/responses/responses.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 6c9f58b43..a46c4182c 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -128,7 +128,7 @@ export class Responses extends APIResource { } /** - * Creates a chat completion stream + * Creates a model response stream */ stream>( body: Params, From e080e12cd3ab75ddc843746e7baa8fbf38f8a031 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 18:37:12 +0000 Subject: [PATCH 479/533] release: 4.91.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f6df5bd5c..0fdb6f309 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.91.0" + ".": "4.91.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 8cf3201bb..0de0d9630 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.91.1 (2025-04-01) + +Full Changelog: [v4.91.0...v4.91.1](https://github.com/openai/openai-node/compare/v4.91.0...v4.91.1) + +### Bug Fixes + +* **docs:** correct docstring on responses.stream ([1c8cd6a](https://github.com/openai/openai-node/commit/1c8cd6a638128b0ff5fac89d6c7db256f0b63a85)) + + +### Chores + +* Remove deprecated/unused remote spec feature ([ce3dfa8](https://github.com/openai/openai-node/commit/ce3dfa88bd4d395debccc0e6e1aac6d218b07cb8)) + ## 4.91.0 (2025-03-31) Full Changelog: [v4.90.0...v4.91.0](https://github.com/openai/openai-node/compare/v4.90.0...v4.91.0) diff --git a/jsr.json b/jsr.json index 4595ab4b7..9bd85f8c9 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.91.0", + "version": "4.91.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 089656265..cfa3e6201 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.91.0", + "version": "4.91.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 0095d88c8..85314d847 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.91.0'; // x-release-please-version +export const VERSION = '4.91.1'; // x-release-please-version From 32afb0022939b19069c37fcd9cabfe666ea86b77 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 23:12:23 +0000 Subject: [PATCH 480/533] feat(api): manual updates --- .stats.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 2ccfd3411..71ac95541 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6663c59193eb95b201e492de17dcbd5e126ba03d18ce66287a3e2c632ca56fe7.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 2daae06cc598821ccf87201de0861e40 +config_hash: 31a12443afeef2933b34e2de23c40954 From efce6d3d719ad463b035b22e9a1cf461ab62b5af Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 23:18:37 +0000 Subject: [PATCH 481/533] feat(api): manual updates --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 71ac95541..baad2afc1 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 31a12443afeef2933b34e2de23c40954 +config_hash: 178ba1bfb1237bf6b94abb3408072aa7 From 5e5e4607a103fcb6257c071bb4bf57902ee6415f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 13:34:13 +0000 Subject: [PATCH 482/533] fix(client): send `X-Stainless-Timeout` in seconds (#1442) --- src/core.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core.ts b/src/core.ts index a3f664906..1858a31e8 100644 --- a/src/core.ts +++ b/src/core.ts @@ -406,7 +406,7 @@ export abstract class APIClient { getHeader(headers, 'x-stainless-timeout') === undefined && options.timeout ) { - reqHeaders['x-stainless-timeout'] = String(options.timeout); + reqHeaders['x-stainless-timeout'] = String(Math.trunc(options.timeout / 1000)); } this.validateHeaders(reqHeaders, headers); From c1c281983e23dcfdca964720265d3cba28b17795 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 14:49:58 +0000 Subject: [PATCH 483/533] feat(api): manual updates --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index baad2afc1..675edb075 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 178ba1bfb1237bf6b94abb3408072aa7 +config_hash: 578c5bff4208d560c0c280f13324409f From 1cb66b6ccbcecaa6e48b90d37d8cac4840bb69a4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 20:55:24 +0000 Subject: [PATCH 484/533] chore(internal): add aliases for Record and Array (#1443) --- src/core.ts | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/core.ts b/src/core.ts index 1858a31e8..ee445daf1 100644 --- a/src/core.ts +++ b/src/core.ts @@ -34,6 +34,20 @@ export { export type Fetch = (url: RequestInfo, init?: RequestInit) => Promise; +/** + * An alias to the builtin `Array` type so we can + * easily alias it in import statements if there are name clashes. + */ +type _Array = Array; + +/** + * An alias to the builtin `Record` type so we can + * easily alias it in import statements if there are name clashes. + */ +type _Record = Record; + +export type { _Array as Array, _Record as Record }; + type PromiseOrValue = T | Promise; type APIResponseProps = { From 4af79ddd5b19925fa09d9ae877470aa8304535c2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Apr 2025 18:22:30 +0000 Subject: [PATCH 485/533] fix(api): improve type resolution when importing as a package (#1444) --- packages/mcp-server/src/tools.ts | 1 + 1 file changed, 1 insertion(+) create mode 100644 packages/mcp-server/src/tools.ts diff --git a/packages/mcp-server/src/tools.ts b/packages/mcp-server/src/tools.ts new file mode 100644 index 000000000..7e516de7c --- /dev/null +++ b/packages/mcp-server/src/tools.ts @@ -0,0 +1 @@ +export * from './tools/index'; From b893d81420359c712dab6997c2dbc9f309549712 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 3 Apr 2025 18:37:52 +0000 Subject: [PATCH 486/533] feat(api): manual updates --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 675edb075..aebb90c8c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 82 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: 578c5bff4208d560c0c280f13324409f +config_hash: bcd2cacdcb9fae9938f273cd167f613c From 4ba994773b41a3ed05a3ad908b235fc5f3810dfc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 4 Apr 2025 21:09:16 +0000 Subject: [PATCH 487/533] fix(mcp): remove unused tools.ts (#1445) --- packages/mcp-server/src/tools.ts | 1 - 1 file changed, 1 deletion(-) delete mode 100644 packages/mcp-server/src/tools.ts diff --git a/packages/mcp-server/src/tools.ts b/packages/mcp-server/src/tools.ts deleted file mode 100644 index 7e516de7c..000000000 --- a/packages/mcp-server/src/tools.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './tools/index'; From d6b99c8fcbd35ef6b45d66f487aea759c01febbc Mon Sep 17 00:00:00 2001 From: Richard Dzenis Date: Mon, 7 Apr 2025 14:00:53 +0300 Subject: [PATCH 488/533] fix(embeddings): correctly decode base64 data (#1448) * Fix Core.toFloat32Array, Buffer version According to NodeJS docs Buffer.buffer is not guaranteed to correspond exactly to the original Buffer. [1] The previous implementation could use buffer garbage while converting bytes to floats. [1] https://nodejs.org/api/buffer.html#bufbuffer * add tests for embeddings data * fix formatting --------- Co-authored-by: Robert Craigie --- src/core.ts | 5 +- .../embeddings-base64-response.json | 1 + .../embeddings-float-response.json | 1 + tests/api-resources/embeddings.test.ts | 57 ++++++++++++++++--- 4 files changed, 55 insertions(+), 9 deletions(-) create mode 100644 tests/api-resources/embeddings-base64-response.json create mode 100644 tests/api-resources/embeddings-float-response.json diff --git a/src/core.ts b/src/core.ts index ee445daf1..ccc677e0e 100644 --- a/src/core.ts +++ b/src/core.ts @@ -1309,7 +1309,10 @@ export const toBase64 = (str: string | null | undefined): string => { export const toFloat32Array = (base64Str: string): Array => { if (typeof Buffer !== 'undefined') { // for Node.js environment - return Array.from(new Float32Array(Buffer.from(base64Str, 'base64').buffer)); + const buf = Buffer.from(base64Str, 'base64'); + return Array.from( + new Float32Array(buf.buffer, buf.byteOffset, buf.length / Float32Array.BYTES_PER_ELEMENT), + ); } else { // for legacy web platform APIs const binaryStr = atob(base64Str); diff --git a/tests/api-resources/embeddings-base64-response.json b/tests/api-resources/embeddings-base64-response.json new file mode 100644 index 000000000..9b0f7629c --- /dev/null +++ b/tests/api-resources/embeddings-base64-response.json @@ -0,0 +1 @@ +{"object":"list","data":[{"object":"embedding","index":0,"embedding":"A1fLvaC4Bb0QB7w8yEvrPOm9Xj2r0yA8EW4sPRq75j3Fbiq81/chPumAGb0afqG8R6AFvpzsQT35SPO7Hi39PEMAir1lf0A92McfvRoVlLxQv9o9tHqIvQYlrL0fwlK8sufPPYz2gjzH5Ho93GebvN+eCTxjRjW8PJRKvXMtFD4+n3C9ByMPO39Gkjs1Jm49A1fLPdNXpjv8RLm92McfveKpLz01VNO9SUIevhAHvD0flG09+9srvW5j7Txp8dY8LW4Ju08bJb1GdL29g+aNPWlLBD1p8dY8LkCkvfPLtjxcBj4+1/ehPebv/bz/Ifo8SqkOvREFHzyAr588HbUPPbFS+r00gri825WAPQlcGj1qHZ+8o8EOPo880Tn5dli9zRUSPc2APD0b5RG9mhxEvTyUSj3FQMU95u/9vE20tD3wwBC94NmxvXSUhL3Ofh8904WLPRbeJb2Paja8BClmvhwgOj2e6Ic9em0LPdj1BD3lSau7dJQEPJi107yB6kc97sTKO6lAaD2YDwE9YDuPPSFVC735dtg9SK1IOysJNrwtQkE8BmJxPb2ZXT0hVYs9g+YNvLfuuz2nyhe9z7nHN5UVWDxea5E77F1avTIbyL256oG9ft+hPVWJAbwNoug82TCtvUrm072wgN86JPWGO3TRyTwOY4a8xJwPvkx5DL1f1B68RwkTvja7Q72BrQI9Pfs6PTdfeb3RxG09jJxVvfl22D3eCbQ9FbR6vTPtYrn0mzS+kqGkPDxXhbwyG8i98M9wveayuL1EpL88lNqvve3yL70RQmQ7VcZGPaPBjr1wyEA9fKaWOskMibwNomi8J9Rku9EeGz016Si8O1mivQ38lb0EgxO88P1VvcilmLuNA0a9lj8DvHCceD3lSSs9uFWsve6HBT6XEZ68ShS5PFJSE70dTIK86OvDvSNgsbzS8DU8bPz8PAuVpTxKQIE9/NmOPBhFFj7LsL67PJRKvIxu8LwSqVS8D8yTPSOOlj1g0gG8A+69vYz2AjxPhLK80fLSPbrL/LztWz09LAcZvqfKF73B/JO8lnzIvCk5OLxwMU69dmQCvQtp3bs6hwe9WZKKume4S7x3CLg9zK4hPLsjDT16P6a7MbTXPRp+IT0dtQ89GayGvcngwD2F8bO70R4bu8tFlDxcBr67xAWdvdnWfzzQTIC9zn6fPYSKwz3alx28h8GxPW74wj3eNxk+xUBFvIpjyj0WdRi9AkoIPXhvqLugx+U8F0ezvUlCHjx3NAC9uvlhPEOmXD36oAM9D56uvddgrz2giiC9GhWUvHrWGLv0yRk8fOPbvMc+KLs7//S8v5UjPJUV2D0KLjW6YKa5PDciNDuJznQ9USZLPQ=="}],"model":"text-embedding-3-large","usage":{"prompt_tokens":1,"total_tokens":1}} \ No newline at end of file diff --git a/tests/api-resources/embeddings-float-response.json b/tests/api-resources/embeddings-float-response.json new file mode 100644 index 000000000..9b5b788e2 --- /dev/null +++ b/tests/api-resources/embeddings-float-response.json @@ -0,0 +1 @@ +{"object":"list","data":[{"object":"embedding","index":0,"embedding":[-0.099287055,-0.032646775,0.022952586,0.028722659,0.05438033,0.009816091,0.042097155,0.112661555,-0.010402386,0.158172,-0.037476454,-0.01971345,-0.13049422,0.04734479,-0.0074244705,0.030905303,-0.06738331,0.046996493,-0.039008945,-0.018076468,0.10681021,-0.06664029,-0.08405499,-0.012863665,0.10151614,0.015986703,0.061253335,-0.018970422,0.008399694,-0.011064145,-0.049457774,0.14470463,-0.058745615,0.0021840946,0.00446397,0.058141906,0.099287055,0.0050763874,-0.09046361,-0.039008945,0.042886622,-0.103187956,-0.15454973,0.091810346,0.058002587,-0.041957837,0.028978076,0.02623816,-0.002097021,-0.040309247,-0.09250693,0.06928732,0.03229848,0.02623816,-0.08020054,0.022314047,0.18557113,0.079086,-0.030998182,0.030533789,-0.034829415,0.009705798,0.019492865,0.035084832,-0.122228034,-0.022523023,0.06278583,0.037685428,-0.019423205,0.13941054,0.00039908706,-0.052847836,0.035665322,0.04602127,-0.035618883,-0.04787884,0.049457774,0.096314944,-0.030998182,0.08823452,-0.03534025,-0.086841345,-0.06473628,0.03893929,0.06812634,-0.040495,-0.011133804,-0.22476584,0.045440778,0.06636165,0.03403995,0.032461017,-0.005227315,0.008092035,-0.025843427,0.048807625,0.0061880266,0.05670229,0.031509012,0.06993747,-0.034016732,0.10569567,0.0030620862,-0.011110584,0.011795563,0.058931373,0.054101694,0.068033464,-0.008660915,0.091763906,-0.0370585,0.000023809172,0.013188739,0.004437848,-0.053312227,-0.09770812,-0.06343598,0.07903956,-0.007906278,0.028397584,-0.084565826,-0.103466585,0.0017051902,0.0041185785,0.024636008,-0.016404655,-0.14024645,-0.034295365,-0.009694188,-0.14359008,-0.04778596,0.031903747,0.045649756,-0.06088182,0.058049027,-0.052151248,0.10569567,0.087909445,-0.061206896,-0.00021641403,-0.17637616,0.020096574,-0.016276948,-0.09770812,-0.058792055,-0.09018497,0.023393758,-0.08586612,-0.04295628,0.0034829418,0.048528988,-0.06970527,0.047066152,0.0011493708,-0.01672973,-0.014198792,-0.0034916492,0.037871186,-0.010309507,-0.079271756,-0.073234655,-0.0090034045,-0.052244127,-0.0046584345,-0.04834323,-0.008010766,0.060696065,0.04181852,-0.08414787,0.13040134,-0.019295497,0.022592682,-0.03596718,-0.015905434,-0.0956648,-0.021652287,0.011104779,0.030882083,0.02021267,0.0631109,0.017437927,0.14674795,-0.005819415,-0.012364443,-0.029349588,-0.012979763,0.072166555,0.07351329,-0.007923692,-0.09273913,0.007993352,-0.021791605,0.1030022,-0.030858863,0.046230245,-0.14944142,-0.0370585,-0.018064858,-0.02447347,-0.011244097,-0.050340116,-0.03183409,-0.006756907,-0.033087946,-0.001057218,-0.012434102,0.089859895,0.009868335,0.034457903,-0.005073485,0.10532416,0.0394269,0.035084832,-0.06575794,0.09417874,-0.005491438,-0.002366949,0.018099686,-0.005799098,-0.07667115,0.0156151885,-0.06264651,0.07787858,0.09547904,-0.009618724,0.086794905,0.095200405,0.14962718,-0.012039368,0.09882267,-0.037221037,0.033273704,-0.0051402412,0.02804929,-0.08753794,0.009659358,-0.031300034,0.01379245,0.053869497,0.03213594,-0.08526241,0.085633926,-0.039194703,-0.018076468,-0.0023321197,0.009386528,-0.026841871,-0.0025672184,-0.02990686,0.009984433,0.105509914,-0.00069114624,0.022662342,0.0027486214,0.05976728,0.04959709]}],"model":"text-embedding-3-large","usage":{"prompt_tokens":1,"total_tokens":1}} \ No newline at end of file diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts index e226ade9e..629265643 100644 --- a/tests/api-resources/embeddings.test.ts +++ b/tests/api-resources/embeddings.test.ts @@ -2,6 +2,9 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; +import { mockFetch } from '../utils/mock-fetch'; +import fs from 'fs/promises'; +import Path from 'path'; const client = new OpenAI({ apiKey: 'My API Key', @@ -33,34 +36,72 @@ describe('resource embeddings', () => { }); }); - test('create: encoding_format=float should create float32 embeddings', async () => { + test('create: encoding_format=default should create float32 embeddings', async () => { + const client = makeClient(); const response = await client.embeddings.create({ input: 'The quick brown fox jumped over the lazy dog', model: 'text-embedding-3-small', }); expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); - expect(Number.isFinite(response.data?.at(0)?.embedding.at(0))).toBe(true); + expect(response.data?.at(0)?.embedding.at(0)).toBe(-0.09928705543279648); }); - test('create: encoding_format=base64 should create float32 embeddings', async () => { + test('create: encoding_format=float should create float32 embeddings', async () => { + const client = makeClient(); const response = await client.embeddings.create({ input: 'The quick brown fox jumped over the lazy dog', model: 'text-embedding-3-small', - encoding_format: 'base64', + encoding_format: 'float', }); expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); - expect(Number.isFinite(response.data?.at(0)?.embedding.at(0))).toBe(true); + expect(response.data?.at(0)?.embedding.at(0)).toBe(-0.099287055); }); - test('create: encoding_format=default should create float32 embeddings', async () => { + test('create: encoding_format=base64 should return base64 embeddings', async () => { + const client = makeClient(); const response = await client.embeddings.create({ input: 'The quick brown fox jumped over the lazy dog', model: 'text-embedding-3-small', + encoding_format: 'base64', }); - expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); - expect(Number.isFinite(response.data?.at(0)?.embedding.at(0))).toBe(true); + expect(typeof response.data?.at(0)?.embedding).toBe('string'); }); }); + +function makeClient(): OpenAI { + const { fetch, handleRequest } = mockFetch(); + + handleRequest(async (_, init) => { + const format = (JSON.parse(init!.body as string) as OpenAI.EmbeddingCreateParams).encoding_format; + return new Response( + await fs.readFile( + Path.join( + __dirname, + + // these responses were taken from the live API with: + // + // model: 'text-embedding-3-large', + // input: 'h', + // dimensions: 256, + + format === 'base64' ? 'embeddings-base64-response.json' : 'embeddings-float-response.json', + ), + ), + { + status: 200, + headers: { + 'Content-Type': 'application/json', + }, + }, + ); + }); + + return new OpenAI({ + fetch, + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', + }); +} From 0d31f406d3c47fb2f3a2a406a53ca28279af5641 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 11:01:19 +0000 Subject: [PATCH 489/533] release: 4.92.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 24 ++++++++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 28 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 0fdb6f309..e2b30744d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.91.1" + ".": "4.92.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 0de0d9630..56fe95617 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## 4.92.0 (2025-04-07) + +Full Changelog: [v4.91.1...v4.92.0](https://github.com/openai/openai-node/compare/v4.91.1...v4.92.0) + +### Features + +* **api:** manual updates ([891754d](https://github.com/openai/openai-node/commit/891754d7fa42d71ce4f93288dd043ef0b97fee23)) +* **api:** manual updates ([01e5546](https://github.com/openai/openai-node/commit/01e5546f3f48a1f4d645e09e7581f16b30f25bdd)) +* **api:** manual updates ([f38dbf3](https://github.com/openai/openai-node/commit/f38dbf3b39b0800b3bbef5c603a4fa2b616f25d8)) +* **api:** manual updates ([1f12253](https://github.com/openai/openai-node/commit/1f12253054a5a7e35dc03b17901b4c1f33bf5b3d)) + + +### Bug Fixes + +* **api:** improve type resolution when importing as a package ([#1444](https://github.com/openai/openai-node/issues/1444)) ([4aa46d6](https://github.com/openai/openai-node/commit/4aa46d6c0da681bcdde31fcbb09e8ba6fdaf764b)) +* **client:** send `X-Stainless-Timeout` in seconds ([#1442](https://github.com/openai/openai-node/issues/1442)) ([aa4206c](https://github.com/openai/openai-node/commit/aa4206c7d93b4e3114a697f5467ffbbf5a64d1a8)) +* **embeddings:** correctly decode base64 data ([#1448](https://github.com/openai/openai-node/issues/1448)) ([58128f7](https://github.com/openai/openai-node/commit/58128f7efde73726da740c42adde7b02cdf60a6a)) +* **mcp:** remove unused tools.ts ([#1445](https://github.com/openai/openai-node/issues/1445)) ([520a8fa](https://github.com/openai/openai-node/commit/520a8fa77a69ce5855dde3481f9bd39339cb7b83)) + + +### Chores + +* **internal:** add aliases for Record and Array ([#1443](https://github.com/openai/openai-node/issues/1443)) ([b65391b](https://github.com/openai/openai-node/commit/b65391ba10d5063035c3e5c0bcc5a48ffc80f41d)) + ## 4.91.1 (2025-04-01) Full Changelog: [v4.91.0...v4.91.1](https://github.com/openai/openai-node/compare/v4.91.0...v4.91.1) diff --git a/jsr.json b/jsr.json index 9bd85f8c9..e5bac64a6 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.91.1", + "version": "4.92.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index cfa3e6201..2f79e9653 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.91.1", + "version": "4.92.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 85314d847..e2dac6b4c 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.91.1'; // x-release-please-version +export const VERSION = '4.92.0'; // x-release-please-version From 93569f39799512604db439af20f0ef0ad3dae295 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 14:51:53 +0000 Subject: [PATCH 490/533] chore(internal): only run examples workflow in main repo (#1450) --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 627f5954f..6e59bb3fa 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -68,6 +68,7 @@ jobs: examples: name: examples runs-on: ubuntu-latest + if: github.repository == 'openai/openai-node' steps: - uses: actions/checkout@v4 From 324b091e8d70c6a13e486ca87727d4bd59d9b71f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 14:52:33 +0000 Subject: [PATCH 491/533] release: 4.92.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e2b30744d..837894bfb 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.92.0" + ".": "4.92.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 56fe95617..105627c5b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.92.1 (2025-04-07) + +Full Changelog: [v4.92.0...v4.92.1](https://github.com/openai/openai-node/compare/v4.92.0...v4.92.1) + +### Chores + +* **internal:** only run examples workflow in main repo ([#1450](https://github.com/openai/openai-node/issues/1450)) ([5e49a7a](https://github.com/openai/openai-node/commit/5e49a7a447bb788fa05898c15ae57c6ea9c8fd49)) + ## 4.92.0 (2025-04-07) Full Changelog: [v4.91.1...v4.92.0](https://github.com/openai/openai-node/compare/v4.91.1...v4.92.0) diff --git a/jsr.json b/jsr.json index e5bac64a6..b986198a1 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.92.0", + "version": "4.92.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 2f79e9653..58c231fda 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.92.0", + "version": "4.92.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index e2dac6b4c..bfae301de 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.92.0'; // x-release-please-version +export const VERSION = '4.92.1'; // x-release-please-version From 15a86c958bf300486907f2498e1028fc9bc50b00 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 14:25:49 +0000 Subject: [PATCH 492/533] chore(tests): improve enum examples (#1454) --- tests/api-resources/images.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index 88eb97a93..43e67b030 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -28,7 +28,7 @@ describe('resource images', () => { model: 'dall-e-2', n: 1, response_format: 'url', - size: '256x256', + size: '1024x1024', user: 'user-1234', }); }); @@ -55,7 +55,7 @@ describe('resource images', () => { model: 'dall-e-2', n: 1, response_format: 'url', - size: '256x256', + size: '1024x1024', user: 'user-1234', }); }); @@ -78,7 +78,7 @@ describe('resource images', () => { n: 1, quality: 'standard', response_format: 'url', - size: '256x256', + size: '1024x1024', style: 'vivid', user: 'user-1234', }); From 33b66f517e756e63c676efee97f7122b3cf165d1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 18:49:53 +0000 Subject: [PATCH 493/533] feat(api): Add evalapi to sdk (#1456) Adding the evalsapi to the sdk. --- .stats.yml | 8 +- api.md | 72 ++ src/index.ts | 38 + src/resources/evals.ts | 3 + src/resources/evals/evals.ts | 783 ++++++++++++ src/resources/evals/index.ts | 33 + src/resources/evals/runs.ts | 3 + src/resources/evals/runs/index.ts | 23 + src/resources/evals/runs/output-items.ts | 410 +++++++ src/resources/evals/runs/runs.ts | 1058 +++++++++++++++++ src/resources/fine-tuning/checkpoints.ts | 3 + .../fine-tuning/checkpoints/checkpoints.ts | 32 + .../fine-tuning/checkpoints/index.ts | 12 + .../fine-tuning/checkpoints/permissions.ts | 198 +++ src/resources/fine-tuning/fine-tuning.ts | 6 + src/resources/fine-tuning/index.ts | 1 + src/resources/index.ts | 17 + tests/api-resources/evals/evals.test.ts | 417 +++++++ .../evals/runs/output-items.test.ts | 61 + tests/api-resources/evals/runs/runs.test.ts | 118 ++ .../checkpoints/permissions.test.ts | 85 ++ 21 files changed, 3377 insertions(+), 4 deletions(-) create mode 100644 src/resources/evals.ts create mode 100644 src/resources/evals/evals.ts create mode 100644 src/resources/evals/index.ts create mode 100644 src/resources/evals/runs.ts create mode 100644 src/resources/evals/runs/index.ts create mode 100644 src/resources/evals/runs/output-items.ts create mode 100644 src/resources/evals/runs/runs.ts create mode 100644 src/resources/fine-tuning/checkpoints.ts create mode 100644 src/resources/fine-tuning/checkpoints/checkpoints.ts create mode 100644 src/resources/fine-tuning/checkpoints/index.ts create mode 100644 src/resources/fine-tuning/checkpoints/permissions.ts create mode 100644 tests/api-resources/evals/evals.test.ts create mode 100644 tests/api-resources/evals/runs/output-items.test.ts create mode 100644 tests/api-resources/evals/runs/runs.test.ts create mode 100644 tests/api-resources/fine-tuning/checkpoints/permissions.test.ts diff --git a/.stats.yml b/.stats.yml index aebb90c8c..ebe07c137 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 82 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml -openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e -config_hash: bcd2cacdcb9fae9938f273cd167f613c +configured_endpoints: 97 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-472fe3036ea745365257fe870c0330917fb3153705c2826f49873cd631319b0a.yml +openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 +config_hash: ef19d36c307306f14f2e1cd5c834a151 diff --git a/api.md b/api.md index cf464cf63..2eb54b34a 100644 --- a/api.md +++ b/api.md @@ -235,6 +235,22 @@ Methods: - client.fineTuning.jobs.checkpoints.list(fineTuningJobId, { ...params }) -> FineTuningJobCheckpointsPage +## Checkpoints + +### Permissions + +Types: + +- PermissionCreateResponse +- PermissionRetrieveResponse +- PermissionDeleteResponse + +Methods: + +- client.fineTuning.checkpoints.permissions.create(fineTunedModelCheckpoint, { ...params }) -> PermissionCreateResponsesPage +- client.fineTuning.checkpoints.permissions.retrieve(fineTunedModelCheckpoint, { ...params }) -> PermissionRetrieveResponse +- client.fineTuning.checkpoints.permissions.del(fineTunedModelCheckpoint) -> PermissionDeleteResponse + # VectorStores Types: @@ -643,3 +659,59 @@ Types: Methods: - client.responses.inputItems.list(responseId, { ...params }) -> ResponseItemsPage + +# Evals + +Types: + +- EvalCustomDataSourceConfig +- EvalLabelModelGrader +- EvalStoredCompletionsDataSourceConfig +- EvalStringCheckGrader +- EvalTextSimilarityGrader +- EvalCreateResponse +- EvalRetrieveResponse +- EvalUpdateResponse +- EvalListResponse +- EvalDeleteResponse + +Methods: + +- client.evals.create({ ...params }) -> EvalCreateResponse +- client.evals.retrieve(evalId) -> EvalRetrieveResponse +- client.evals.update(evalId, { ...params }) -> EvalUpdateResponse +- client.evals.list({ ...params }) -> EvalListResponsesPage +- client.evals.del(evalId) -> EvalDeleteResponse + +## Runs + +Types: + +- CreateEvalCompletionsRunDataSource +- CreateEvalJSONLRunDataSource +- EvalAPIError +- RunCreateResponse +- RunRetrieveResponse +- RunListResponse +- RunDeleteResponse +- RunCancelResponse + +Methods: + +- client.evals.runs.create(evalId, { ...params }) -> RunCreateResponse +- client.evals.runs.retrieve(evalId, runId) -> RunRetrieveResponse +- client.evals.runs.list(evalId, { ...params }) -> RunListResponsesPage +- client.evals.runs.del(evalId, runId) -> RunDeleteResponse +- client.evals.runs.cancel(evalId, runId) -> RunCancelResponse + +### OutputItems + +Types: + +- OutputItemRetrieveResponse +- OutputItemListResponse + +Methods: + +- client.evals.runs.outputItems.retrieve(evalId, runId, outputItemId) -> OutputItemRetrieveResponse +- client.evals.runs.outputItems.list(evalId, runId, { ...params }) -> OutputItemListResponsesPage diff --git a/src/index.ts b/src/index.ts index 931894f2f..9e8d7ce37 100644 --- a/src/index.ts +++ b/src/index.ts @@ -66,6 +66,23 @@ import { import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio'; import { Beta } from './resources/beta/beta'; import { Chat } from './resources/chat/chat'; +import { + EvalCreateParams, + EvalCreateResponse, + EvalCustomDataSourceConfig, + EvalDeleteResponse, + EvalLabelModelGrader, + EvalListParams, + EvalListResponse, + EvalListResponsesPage, + EvalRetrieveResponse, + EvalStoredCompletionsDataSourceConfig, + EvalStringCheckGrader, + EvalTextSimilarityGrader, + EvalUpdateParams, + EvalUpdateResponse, + Evals, +} from './resources/evals/evals'; import { FineTuning } from './resources/fine-tuning/fine-tuning'; import { Responses } from './resources/responses/responses'; import { @@ -293,6 +310,7 @@ export class OpenAI extends Core.APIClient { batches: API.Batches = new API.Batches(this); uploads: API.Uploads = new API.Uploads(this); responses: API.Responses = new API.Responses(this); + evals: API.Evals = new API.Evals(this); protected override defaultQuery(): Core.DefaultQuery | undefined { return this._options.defaultQuery; @@ -356,6 +374,8 @@ OpenAI.Batches = Batches; OpenAI.BatchesPage = BatchesPage; OpenAI.Uploads = UploadsAPIUploads; OpenAI.Responses = Responses; +OpenAI.Evals = Evals; +OpenAI.EvalListResponsesPage = EvalListResponsesPage; export declare namespace OpenAI { export type RequestOptions = Core.RequestOptions; @@ -508,6 +528,24 @@ export declare namespace OpenAI { export { Responses as Responses }; + export { + Evals as Evals, + type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, + type EvalLabelModelGrader as EvalLabelModelGrader, + type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, + type EvalStringCheckGrader as EvalStringCheckGrader, + type EvalTextSimilarityGrader as EvalTextSimilarityGrader, + type EvalCreateResponse as EvalCreateResponse, + type EvalRetrieveResponse as EvalRetrieveResponse, + type EvalUpdateResponse as EvalUpdateResponse, + type EvalListResponse as EvalListResponse, + type EvalDeleteResponse as EvalDeleteResponse, + EvalListResponsesPage as EvalListResponsesPage, + type EvalCreateParams as EvalCreateParams, + type EvalUpdateParams as EvalUpdateParams, + type EvalListParams as EvalListParams, + }; + export type AllModels = API.AllModels; export type ChatModel = API.ChatModel; export type ComparisonFilter = API.ComparisonFilter; diff --git a/src/resources/evals.ts b/src/resources/evals.ts new file mode 100644 index 000000000..b611710e1 --- /dev/null +++ b/src/resources/evals.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './evals/index'; diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts new file mode 100644 index 000000000..84ff6d1bb --- /dev/null +++ b/src/resources/evals/evals.ts @@ -0,0 +1,783 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as Shared from '../shared'; +import * as RunsAPI from './runs/runs'; +import { + CreateEvalCompletionsRunDataSource, + CreateEvalJSONLRunDataSource, + EvalAPIError, + RunCancelResponse, + RunCreateParams, + RunCreateResponse, + RunDeleteResponse, + RunListParams, + RunListResponse, + RunListResponsesPage, + RunRetrieveResponse, + Runs, +} from './runs/runs'; +import { CursorPage, type CursorPageParams } from '../../pagination'; + +export class Evals extends APIResource { + runs: RunsAPI.Runs = new RunsAPI.Runs(this._client); + + /** + * Create the structure of an evaluation that can be used to test a model's + * performance. An evaluation is a set of testing criteria and a datasource. After + * creating an evaluation, you can run it on different models and model parameters. + * We support several types of graders and datasources. For more information, see + * the [Evals guide](https://platform.openai.com/docs/guides/evals). + */ + create(body: EvalCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/evals', { body, ...options }); + } + + /** + * Get an evaluation by ID. + */ + retrieve(evalId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/evals/${evalId}`, options); + } + + /** + * Update certain properties of an evaluation. + */ + update( + evalId: string, + body: EvalUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/evals/${evalId}`, { body, ...options }); + } + + /** + * List evaluations for a project. + */ + list( + query?: EvalListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list(options?: Core.RequestOptions): Core.PagePromise; + list( + query: EvalListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.getAPIList('/evals', EvalListResponsesPage, { query, ...options }); + } + + /** + * Delete an evaluation. + */ + del(evalId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/evals/${evalId}`, options); + } +} + +export class EvalListResponsesPage extends CursorPage {} + +/** + * A CustomDataSourceConfig which specifies the schema of your `item` and + * optionally `sample` namespaces. The response schema defines the shape of the + * data that will be: + * + * - Used to define your testing criteria and + * - What data is required when creating a run + */ +export interface EvalCustomDataSourceConfig { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `custom`. + */ + type: 'custom'; +} + +/** + * A LabelModelGrader object which uses a model to assign labels to each item in + * the evaluation. + */ +export interface EvalLabelModelGrader { + input: Array; + + /** + * The labels to assign to each item in the evaluation. + */ + labels: Array; + + /** + * The model to use for the evaluation. Must support structured outputs. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The labels that indicate a passing result. Must be a subset of labels. + */ + passing_labels: Array; + + /** + * The object type, which is always `label_model`. + */ + type: 'label_model'; +} + +export namespace EvalLabelModelGrader { + export interface InputMessage { + content: InputMessage.Content; + + /** + * The role of the message. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace InputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `input_text`. + */ + type: 'input_text'; + } + } + + export interface Assistant { + content: Assistant.Content; + + /** + * The role of the message. Must be `assistant` for output. + */ + role: 'assistant'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace Assistant { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `output_text`. + */ + type: 'output_text'; + } + } +} + +/** + * A StoredCompletionsDataSourceConfig which specifies the metadata property of + * your stored completions query. This is usually metadata like `usecase=chatbot` + * or `prompt-version=v2`, etc. The schema returned by this data source config is + * used to defined what variables are available in your evals. `item` and `sample` + * are both defined when using this data source config. + */ +export interface EvalStoredCompletionsDataSourceConfig { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `stored_completions`. + */ + type: 'stored_completions'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; +} + +/** + * A StringCheckGrader object that performs a string comparison between input and + * reference using a specified operation. + */ +export interface EvalStringCheckGrader { + /** + * The input text. This may include template strings. + */ + input: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + */ + operation: 'eq' | 'ne' | 'like' | 'ilike'; + + /** + * The reference text. This may include template strings. + */ + reference: string; + + /** + * The object type, which is always `string_check`. + */ + type: 'string_check'; +} + +/** + * A TextSimilarityGrader object which grades text based on similarity metrics. + */ +export interface EvalTextSimilarityGrader { + /** + * The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, + * `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + */ + evaluation_metric: + | 'fuzzy_match' + | 'bleu' + | 'gleu' + | 'meteor' + | 'rouge_1' + | 'rouge_2' + | 'rouge_3' + | 'rouge_4' + | 'rouge_5' + | 'rouge_l' + | 'cosine'; + + /** + * The text being graded. + */ + input: string; + + /** + * A float score where a value greater than or equal indicates a passing grade. + */ + pass_threshold: number; + + /** + * The text being graded against. + */ + reference: string; + + /** + * The type of grader. + */ + type: 'text_similarity'; + + /** + * The name of the grader. + */ + name?: string; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o3-mini is better at my usecase than gpt-4o + */ +export interface EvalCreateResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai: boolean; + + /** + * A list of testing criteria. + */ + testing_criteria: Array; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o3-mini is better at my usecase than gpt-4o + */ +export interface EvalRetrieveResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai: boolean; + + /** + * A list of testing criteria. + */ + testing_criteria: Array; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o3-mini is better at my usecase than gpt-4o + */ +export interface EvalUpdateResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai: boolean; + + /** + * A list of testing criteria. + */ + testing_criteria: Array; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o3-mini is better at my usecase than gpt-4o + */ +export interface EvalListResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai: boolean; + + /** + * A list of testing criteria. + */ + testing_criteria: Array; +} + +export interface EvalDeleteResponse { + deleted: boolean; + + eval_id: string; + + object: string; +} + +export interface EvalCreateParams { + /** + * The configuration for the data source used for the evaluation runs. + */ + data_source_config: EvalCreateParams.Custom | EvalCreateParams.StoredCompletions; + + /** + * A list of graders for all eval runs in this group. + */ + testing_criteria: Array; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name?: string; + + /** + * Indicates whether the evaluation is shared with OpenAI. + */ + share_with_openai?: boolean; +} + +export namespace EvalCreateParams { + /** + * A CustomDataSourceConfig object that defines the schema for the data source used + * for the evaluation runs. This schema is used to define the shape of the data + * that will be: + * + * - Used to define your testing criteria and + * - What data is required when creating a run + */ + export interface Custom { + /** + * The json schema for the run data source items. + */ + item_schema: Record; + + /** + * The type of data source. Always `custom`. + */ + type: 'custom'; + + /** + * Whether to include the sample schema in the data source. + */ + include_sample_schema?: boolean; + } + + /** + * A data source config which specifies the metadata property of your stored + * completions query. This is usually metadata like `usecase=chatbot` or + * `prompt-version=v2`, etc. + */ + export interface StoredCompletions { + /** + * The type of data source. Always `stored_completions`. + */ + type: 'stored_completions'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + + /** + * A LabelModelGrader object which uses a model to assign labels to each item in + * the evaluation. + */ + export interface LabelModel { + input: Array; + + /** + * The labels to classify to each item in the evaluation. + */ + labels: Array; + + /** + * The model to use for the evaluation. Must support structured outputs. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The labels that indicate a passing result. Must be a subset of labels. + */ + passing_labels: Array; + + /** + * The object type, which is always `label_model`. + */ + type: 'label_model'; + } + + export namespace LabelModel { + export interface SimpleInputMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + export interface InputMessage { + content: InputMessage.Content; + + /** + * The role of the message. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace InputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `input_text`. + */ + type: 'input_text'; + } + } + + export interface OutputMessage { + content: OutputMessage.Content; + + /** + * The role of the message. Must be `assistant` for output. + */ + role: 'assistant'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace OutputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `output_text`. + */ + type: 'output_text'; + } + } + } +} + +export interface EvalUpdateParams { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * Rename the evaluation. + */ + name?: string; +} + +export interface EvalListParams extends CursorPageParams { + /** + * Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for + * descending order. + */ + order?: 'asc' | 'desc'; + + /** + * Evals can be ordered by creation time or last updated time. Use `created_at` for + * creation time or `updated_at` for last updated time. + */ + order_by?: 'created_at' | 'updated_at'; +} + +Evals.EvalListResponsesPage = EvalListResponsesPage; +Evals.Runs = Runs; +Evals.RunListResponsesPage = RunListResponsesPage; + +export declare namespace Evals { + export { + type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, + type EvalLabelModelGrader as EvalLabelModelGrader, + type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, + type EvalStringCheckGrader as EvalStringCheckGrader, + type EvalTextSimilarityGrader as EvalTextSimilarityGrader, + type EvalCreateResponse as EvalCreateResponse, + type EvalRetrieveResponse as EvalRetrieveResponse, + type EvalUpdateResponse as EvalUpdateResponse, + type EvalListResponse as EvalListResponse, + type EvalDeleteResponse as EvalDeleteResponse, + EvalListResponsesPage as EvalListResponsesPage, + type EvalCreateParams as EvalCreateParams, + type EvalUpdateParams as EvalUpdateParams, + type EvalListParams as EvalListParams, + }; + + export { + Runs as Runs, + type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, + type EvalAPIError as EvalAPIError, + type RunCreateResponse as RunCreateResponse, + type RunRetrieveResponse as RunRetrieveResponse, + type RunListResponse as RunListResponse, + type RunDeleteResponse as RunDeleteResponse, + type RunCancelResponse as RunCancelResponse, + RunListResponsesPage as RunListResponsesPage, + type RunCreateParams as RunCreateParams, + type RunListParams as RunListParams, + }; +} diff --git a/src/resources/evals/index.ts b/src/resources/evals/index.ts new file mode 100644 index 000000000..a246fe4e7 --- /dev/null +++ b/src/resources/evals/index.ts @@ -0,0 +1,33 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + EvalListResponsesPage, + Evals, + type EvalCustomDataSourceConfig, + type EvalLabelModelGrader, + type EvalStoredCompletionsDataSourceConfig, + type EvalStringCheckGrader, + type EvalTextSimilarityGrader, + type EvalCreateResponse, + type EvalRetrieveResponse, + type EvalUpdateResponse, + type EvalListResponse, + type EvalDeleteResponse, + type EvalCreateParams, + type EvalUpdateParams, + type EvalListParams, +} from './evals'; +export { + RunListResponsesPage, + Runs, + type CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource, + type EvalAPIError, + type RunCreateResponse, + type RunRetrieveResponse, + type RunListResponse, + type RunDeleteResponse, + type RunCancelResponse, + type RunCreateParams, + type RunListParams, +} from './runs/index'; diff --git a/src/resources/evals/runs.ts b/src/resources/evals/runs.ts new file mode 100644 index 000000000..a3cc2bc7f --- /dev/null +++ b/src/resources/evals/runs.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './runs/index'; diff --git a/src/resources/evals/runs/index.ts b/src/resources/evals/runs/index.ts new file mode 100644 index 000000000..d0e18bff4 --- /dev/null +++ b/src/resources/evals/runs/index.ts @@ -0,0 +1,23 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + OutputItemListResponsesPage, + OutputItems, + type OutputItemRetrieveResponse, + type OutputItemListResponse, + type OutputItemListParams, +} from './output-items'; +export { + RunListResponsesPage, + Runs, + type CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource, + type EvalAPIError, + type RunCreateResponse, + type RunRetrieveResponse, + type RunListResponse, + type RunDeleteResponse, + type RunCancelResponse, + type RunCreateParams, + type RunListParams, +} from './runs'; diff --git a/src/resources/evals/runs/output-items.ts b/src/resources/evals/runs/output-items.ts new file mode 100644 index 000000000..ee947c60f --- /dev/null +++ b/src/resources/evals/runs/output-items.ts @@ -0,0 +1,410 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import * as RunsAPI from './runs'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; + +export class OutputItems extends APIResource { + /** + * Get an evaluation run output item by ID. + */ + retrieve( + evalId: string, + runId: string, + outputItemId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/evals/${evalId}/runs/${runId}/output_items/${outputItemId}`, options); + } + + /** + * Get a list of output items for an evaluation run. + */ + list( + evalId: string, + runId: string, + query?: OutputItemListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + evalId: string, + runId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + evalId: string, + runId: string, + query: OutputItemListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(evalId, runId, {}, query); + } + return this._client.getAPIList( + `/evals/${evalId}/runs/${runId}/output_items`, + OutputItemListResponsesPage, + { query, ...options }, + ); + } +} + +export class OutputItemListResponsesPage extends CursorPage {} + +/** + * A schema representing an evaluation run output item. + */ +export interface OutputItemRetrieveResponse { + /** + * Unique identifier for the evaluation run output item. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Details of the input data source item. + */ + datasource_item: Record; + + /** + * The identifier for the data source item. + */ + datasource_item_id: number; + + /** + * The identifier of the evaluation group. + */ + eval_id: string; + + /** + * The type of the object. Always "eval.run.output_item". + */ + object: 'eval.run.output_item'; + + /** + * A list of results from the evaluation run. + */ + results: Array>; + + /** + * The identifier of the evaluation run associated with this output item. + */ + run_id: string; + + /** + * A sample containing the input and output of the evaluation run. + */ + sample: OutputItemRetrieveResponse.Sample; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace OutputItemRetrieveResponse { + /** + * A sample containing the input and output of the evaluation run. + */ + export interface Sample { + /** + * An object representing an error response from the Eval API. + */ + error: RunsAPI.EvalAPIError; + + /** + * The reason why the sample generation was finished. + */ + finish_reason: string; + + /** + * An array of input messages. + */ + input: Array; + + /** + * The maximum number of tokens allowed for completion. + */ + max_completion_tokens: number; + + /** + * The model used for generating the sample. + */ + model: string; + + /** + * An array of output messages. + */ + output: Array; + + /** + * The seed used for generating the sample. + */ + seed: number; + + /** + * The sampling temperature used. + */ + temperature: number; + + /** + * The top_p value used for sampling. + */ + top_p: number; + + /** + * Token usage details for the sample. + */ + usage: Sample.Usage; + } + + export namespace Sample { + /** + * An input message. + */ + export interface Input { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message sender (e.g., system, user, developer). + */ + role: string; + } + + export interface Output { + /** + * The content of the message. + */ + content?: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role?: string; + } + + /** + * Token usage details for the sample. + */ + export interface Usage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + } +} + +/** + * A schema representing an evaluation run output item. + */ +export interface OutputItemListResponse { + /** + * Unique identifier for the evaluation run output item. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Details of the input data source item. + */ + datasource_item: Record; + + /** + * The identifier for the data source item. + */ + datasource_item_id: number; + + /** + * The identifier of the evaluation group. + */ + eval_id: string; + + /** + * The type of the object. Always "eval.run.output_item". + */ + object: 'eval.run.output_item'; + + /** + * A list of results from the evaluation run. + */ + results: Array>; + + /** + * The identifier of the evaluation run associated with this output item. + */ + run_id: string; + + /** + * A sample containing the input and output of the evaluation run. + */ + sample: OutputItemListResponse.Sample; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace OutputItemListResponse { + /** + * A sample containing the input and output of the evaluation run. + */ + export interface Sample { + /** + * An object representing an error response from the Eval API. + */ + error: RunsAPI.EvalAPIError; + + /** + * The reason why the sample generation was finished. + */ + finish_reason: string; + + /** + * An array of input messages. + */ + input: Array; + + /** + * The maximum number of tokens allowed for completion. + */ + max_completion_tokens: number; + + /** + * The model used for generating the sample. + */ + model: string; + + /** + * An array of output messages. + */ + output: Array; + + /** + * The seed used for generating the sample. + */ + seed: number; + + /** + * The sampling temperature used. + */ + temperature: number; + + /** + * The top_p value used for sampling. + */ + top_p: number; + + /** + * Token usage details for the sample. + */ + usage: Sample.Usage; + } + + export namespace Sample { + /** + * An input message. + */ + export interface Input { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message sender (e.g., system, user, developer). + */ + role: string; + } + + export interface Output { + /** + * The content of the message. + */ + content?: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role?: string; + } + + /** + * Token usage details for the sample. + */ + export interface Usage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + } +} + +export interface OutputItemListParams extends CursorPageParams { + /** + * Sort order for output items by timestamp. Use `asc` for ascending order or + * `desc` for descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; + + /** + * Filter output items by status. Use `failed` to filter by failed output items or + * `pass` to filter by passed output items. + */ + status?: 'fail' | 'pass'; +} + +OutputItems.OutputItemListResponsesPage = OutputItemListResponsesPage; + +export declare namespace OutputItems { + export { + type OutputItemRetrieveResponse as OutputItemRetrieveResponse, + type OutputItemListResponse as OutputItemListResponse, + OutputItemListResponsesPage as OutputItemListResponsesPage, + type OutputItemListParams as OutputItemListParams, + }; +} diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts new file mode 100644 index 000000000..ca2b7f424 --- /dev/null +++ b/src/resources/evals/runs/runs.ts @@ -0,0 +1,1058 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import * as Shared from '../../shared'; +import * as OutputItemsAPI from './output-items'; +import { + OutputItemListParams, + OutputItemListResponse, + OutputItemListResponsesPage, + OutputItemRetrieveResponse, + OutputItems, +} from './output-items'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; + +export class Runs extends APIResource { + outputItems: OutputItemsAPI.OutputItems = new OutputItemsAPI.OutputItems(this._client); + + /** + * Create a new evaluation run. This is the endpoint that will kick off grading. + */ + create( + evalId: string, + body: RunCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/evals/${evalId}/runs`, { body, ...options }); + } + + /** + * Get an evaluation run by ID. + */ + retrieve( + evalId: string, + runId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/evals/${evalId}/runs/${runId}`, options); + } + + /** + * Get a list of runs for an evaluation. + */ + list( + evalId: string, + query?: RunListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + evalId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + evalId: string, + query: RunListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(evalId, {}, query); + } + return this._client.getAPIList(`/evals/${evalId}/runs`, RunListResponsesPage, { query, ...options }); + } + + /** + * Delete an eval run. + */ + del(evalId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/evals/${evalId}/runs/${runId}`, options); + } + + /** + * Cancel an ongoing evaluation run. + */ + cancel(evalId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/evals/${evalId}/runs/${runId}`, options); + } +} + +export class RunListResponsesPage extends CursorPage {} + +/** + * A CompletionsRunDataSource object describing a model sampling configuration. + */ +export interface CreateEvalCompletionsRunDataSource { + input_messages: + | CreateEvalCompletionsRunDataSource.Template + | CreateEvalCompletionsRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model: string; + + /** + * A StoredCompletionsRunDataSource configuration describing a set of filters + */ + source: + | CreateEvalCompletionsRunDataSource.FileContent + | CreateEvalCompletionsRunDataSource.FileID + | CreateEvalCompletionsRunDataSource.StoredCompletions; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + sampling_params?: CreateEvalCompletionsRunDataSource.SamplingParams; +} + +export namespace CreateEvalCompletionsRunDataSource { + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + export interface InputMessage { + content: InputMessage.Content; + + /** + * The role of the message. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace InputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `input_text`. + */ + type: 'input_text'; + } + } + + export interface OutputMessage { + content: OutputMessage.Content; + + /** + * The role of the message. Must be `assistant` for output. + */ + role: 'assistant'; + + /** + * The type of item, which is always `message`. + */ + type: 'message'; + } + + export namespace OutputMessage { + export interface Content { + /** + * The text content. + */ + text: string; + + /** + * The type of content, which is always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A StoredCompletionsRunDataSource configuration describing a set of filters + */ + export interface StoredCompletions { + /** + * An optional Unix timestamp to filter items created after this time. + */ + created_after: number | null; + + /** + * An optional Unix timestamp to filter items created before this time. + */ + created_before: number | null; + + /** + * An optional maximum number of items to return. + */ + limit: number | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * An optional model to filter by (e.g., 'gpt-4o'). + */ + model: string | null; + + /** + * The type of source. Always `stored_completions`. + */ + type: 'stored_completions'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } +} + +/** + * A JsonlRunDataSource object with that specifies a JSONL file that matches the + * eval + */ +export interface CreateEvalJSONLRunDataSource { + source: CreateEvalJSONLRunDataSource.FileContent | CreateEvalJSONLRunDataSource.FileID; + + /** + * The type of data source. Always `jsonl`. + */ + type: 'jsonl'; +} + +export namespace CreateEvalJSONLRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } +} + +/** + * An object representing an error response from the Eval API. + */ +export interface EvalAPIError { + /** + * The error code. + */ + code: string; + + /** + * The error message. + */ + message: string; +} + +/** + * A schema representing an evaluation run. + */ +export interface RunCreateResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunCreateResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunCreateResponse { + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +/** + * A schema representing an evaluation run. + */ +export interface RunRetrieveResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunRetrieveResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunRetrieveResponse { + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +/** + * A schema representing an evaluation run. + */ +export interface RunListResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunListResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunListResponse { + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +export interface RunDeleteResponse { + deleted?: boolean; + + object?: string; + + run_id?: string; +} + +/** + * A schema representing an evaluation run. + */ +export interface RunCancelResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunCancelResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunCancelResponse { + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +export interface RunCreateParams { + /** + * Details about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * The name of the run. + */ + name?: string; +} + +export interface RunListParams extends CursorPageParams { + /** + * Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for + * descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; + + /** + * Filter runs by status. Use "queued" | "in_progress" | "failed" | "completed" | + * "canceled". + */ + status?: 'queued' | 'in_progress' | 'completed' | 'canceled' | 'failed'; +} + +Runs.RunListResponsesPage = RunListResponsesPage; +Runs.OutputItems = OutputItems; +Runs.OutputItemListResponsesPage = OutputItemListResponsesPage; + +export declare namespace Runs { + export { + type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, + type EvalAPIError as EvalAPIError, + type RunCreateResponse as RunCreateResponse, + type RunRetrieveResponse as RunRetrieveResponse, + type RunListResponse as RunListResponse, + type RunDeleteResponse as RunDeleteResponse, + type RunCancelResponse as RunCancelResponse, + RunListResponsesPage as RunListResponsesPage, + type RunCreateParams as RunCreateParams, + type RunListParams as RunListParams, + }; + + export { + OutputItems as OutputItems, + type OutputItemRetrieveResponse as OutputItemRetrieveResponse, + type OutputItemListResponse as OutputItemListResponse, + OutputItemListResponsesPage as OutputItemListResponsesPage, + type OutputItemListParams as OutputItemListParams, + }; +} diff --git a/src/resources/fine-tuning/checkpoints.ts b/src/resources/fine-tuning/checkpoints.ts new file mode 100644 index 000000000..eb09063f6 --- /dev/null +++ b/src/resources/fine-tuning/checkpoints.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './checkpoints/index'; diff --git a/src/resources/fine-tuning/checkpoints/checkpoints.ts b/src/resources/fine-tuning/checkpoints/checkpoints.ts new file mode 100644 index 000000000..08422aa64 --- /dev/null +++ b/src/resources/fine-tuning/checkpoints/checkpoints.ts @@ -0,0 +1,32 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as PermissionsAPI from './permissions'; +import { + PermissionCreateParams, + PermissionCreateResponse, + PermissionCreateResponsesPage, + PermissionDeleteResponse, + PermissionRetrieveParams, + PermissionRetrieveResponse, + Permissions, +} from './permissions'; + +export class Checkpoints extends APIResource { + permissions: PermissionsAPI.Permissions = new PermissionsAPI.Permissions(this._client); +} + +Checkpoints.Permissions = Permissions; +Checkpoints.PermissionCreateResponsesPage = PermissionCreateResponsesPage; + +export declare namespace Checkpoints { + export { + Permissions as Permissions, + type PermissionCreateResponse as PermissionCreateResponse, + type PermissionRetrieveResponse as PermissionRetrieveResponse, + type PermissionDeleteResponse as PermissionDeleteResponse, + PermissionCreateResponsesPage as PermissionCreateResponsesPage, + type PermissionCreateParams as PermissionCreateParams, + type PermissionRetrieveParams as PermissionRetrieveParams, + }; +} diff --git a/src/resources/fine-tuning/checkpoints/index.ts b/src/resources/fine-tuning/checkpoints/index.ts new file mode 100644 index 000000000..51d1af9cf --- /dev/null +++ b/src/resources/fine-tuning/checkpoints/index.ts @@ -0,0 +1,12 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Checkpoints } from './checkpoints'; +export { + PermissionCreateResponsesPage, + Permissions, + type PermissionCreateResponse, + type PermissionRetrieveResponse, + type PermissionDeleteResponse, + type PermissionCreateParams, + type PermissionRetrieveParams, +} from './permissions'; diff --git a/src/resources/fine-tuning/checkpoints/permissions.ts b/src/resources/fine-tuning/checkpoints/permissions.ts new file mode 100644 index 000000000..500c3de81 --- /dev/null +++ b/src/resources/fine-tuning/checkpoints/permissions.ts @@ -0,0 +1,198 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import { Page } from '../../../pagination'; + +export class Permissions extends APIResource { + /** + * **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). + * + * This enables organization owners to share fine-tuned models with other projects + * in their organization. + */ + create( + fineTunedModelCheckpoint: string, + body: PermissionCreateParams, + options?: Core.RequestOptions, + ): Core.PagePromise { + return this._client.getAPIList( + `/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, + PermissionCreateResponsesPage, + { body, method: 'post', ...options }, + ); + } + + /** + * **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + * + * Organization owners can use this endpoint to view all permissions for a + * fine-tuned model checkpoint. + */ + retrieve( + fineTunedModelCheckpoint: string, + query?: PermissionRetrieveParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve( + fineTunedModelCheckpoint: string, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve( + fineTunedModelCheckpoint: string, + query: PermissionRetrieveParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.retrieve(fineTunedModelCheckpoint, {}, query); + } + return this._client.get(`/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, { + query, + ...options, + }); + } + + /** + * **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + * + * Organization owners can use this endpoint to delete a permission for a + * fine-tuned model checkpoint. + */ + del( + fineTunedModelCheckpoint: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.delete(`/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, options); + } +} + +/** + * Note: no pagination actually occurs yet, this is for forwards-compatibility. + */ +export class PermissionCreateResponsesPage extends Page {} + +/** + * The `checkpoint.permission` object represents a permission for a fine-tuned + * model checkpoint. + */ +export interface PermissionCreateResponse { + /** + * The permission identifier, which can be referenced in the API endpoints. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the permission was created. + */ + created_at: number; + + /** + * The object type, which is always "checkpoint.permission". + */ + object: 'checkpoint.permission'; + + /** + * The project identifier that the permission is for. + */ + project_id: string; +} + +export interface PermissionRetrieveResponse { + data: Array; + + has_more: boolean; + + object: 'list'; + + first_id?: string | null; + + last_id?: string | null; +} + +export namespace PermissionRetrieveResponse { + /** + * The `checkpoint.permission` object represents a permission for a fine-tuned + * model checkpoint. + */ + export interface Data { + /** + * The permission identifier, which can be referenced in the API endpoints. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the permission was created. + */ + created_at: number; + + /** + * The object type, which is always "checkpoint.permission". + */ + object: 'checkpoint.permission'; + + /** + * The project identifier that the permission is for. + */ + project_id: string; + } +} + +export interface PermissionDeleteResponse { + /** + * The ID of the fine-tuned model checkpoint permission that was deleted. + */ + id: string; + + /** + * Whether the fine-tuned model checkpoint permission was successfully deleted. + */ + deleted: boolean; + + /** + * The object type, which is always "checkpoint.permission". + */ + object: 'checkpoint.permission'; +} + +export interface PermissionCreateParams { + /** + * The project identifiers to grant access to. + */ + project_ids: Array; +} + +export interface PermissionRetrieveParams { + /** + * Identifier for the last permission ID from the previous pagination request. + */ + after?: string; + + /** + * Number of permissions to retrieve. + */ + limit?: number; + + /** + * The order in which to retrieve permissions. + */ + order?: 'ascending' | 'descending'; + + /** + * The ID of the project to get permissions for. + */ + project_id?: string; +} + +Permissions.PermissionCreateResponsesPage = PermissionCreateResponsesPage; + +export declare namespace Permissions { + export { + type PermissionCreateResponse as PermissionCreateResponse, + type PermissionRetrieveResponse as PermissionRetrieveResponse, + type PermissionDeleteResponse as PermissionDeleteResponse, + PermissionCreateResponsesPage as PermissionCreateResponsesPage, + type PermissionCreateParams as PermissionCreateParams, + type PermissionRetrieveParams as PermissionRetrieveParams, + }; +} diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index df013c8ec..9b0a01992 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -1,6 +1,8 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import * as CheckpointsAPI from './checkpoints/checkpoints'; +import { Checkpoints } from './checkpoints/checkpoints'; import * as JobsAPI from './jobs/jobs'; import { FineTuningJob, @@ -18,11 +20,13 @@ import { export class FineTuning extends APIResource { jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); + checkpoints: CheckpointsAPI.Checkpoints = new CheckpointsAPI.Checkpoints(this._client); } FineTuning.Jobs = Jobs; FineTuning.FineTuningJobsPage = FineTuningJobsPage; FineTuning.FineTuningJobEventsPage = FineTuningJobEventsPage; +FineTuning.Checkpoints = Checkpoints; export declare namespace FineTuning { export { @@ -38,4 +42,6 @@ export declare namespace FineTuning { type JobListParams as JobListParams, type JobListEventsParams as JobListEventsParams, }; + + export { Checkpoints as Checkpoints }; } diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts index 4954406b8..d23161c62 100644 --- a/src/resources/fine-tuning/index.ts +++ b/src/resources/fine-tuning/index.ts @@ -1,5 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export { Checkpoints } from './checkpoints/index'; export { FineTuning } from './fine-tuning'; export { FineTuningJobsPage, diff --git a/src/resources/index.ts b/src/resources/index.ts index 04c2c887b..0d8ec9220 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -29,6 +29,23 @@ export { type EmbeddingModel, type EmbeddingCreateParams, } from './embeddings'; +export { + EvalListResponsesPage, + Evals, + type EvalCustomDataSourceConfig, + type EvalLabelModelGrader, + type EvalStoredCompletionsDataSourceConfig, + type EvalStringCheckGrader, + type EvalTextSimilarityGrader, + type EvalCreateResponse, + type EvalRetrieveResponse, + type EvalUpdateResponse, + type EvalListResponse, + type EvalDeleteResponse, + type EvalCreateParams, + type EvalUpdateParams, + type EvalListParams, +} from './evals/evals'; export { FileObjectsPage, Files, diff --git a/tests/api-resources/evals/evals.test.ts b/tests/api-resources/evals/evals.test.ts new file mode 100644 index 000000000..3aeb3e15c --- /dev/null +++ b/tests/api-resources/evals/evals.test.ts @@ -0,0 +1,417 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource evals', () => { + test('create: only required params', async () => { + const responsePromise = client.evals.create({ + data_source_config: { + item_schema: { + '0': 'bar', + '1': 'bar', + '2': 'bar', + '3': 'bar', + '4': 'bar', + '5': 'bar', + '6': 'bar', + '7': 'bar', + '8': 'bar', + '9': 'bar', + '10': 'bar', + '11': 'bar', + '12': 'bar', + '13': 'bar', + '14': 'bar', + '15': 'bar', + '16': 'bar', + '17': 'bar', + '18': 'bar', + '19': 'bar', + '20': 'bar', + '21': 'bar', + '22': 'bar', + '23': 'bar', + '24': 'bar', + '25': 'bar', + '26': 'bar', + '27': 'bar', + '28': 'bar', + '29': 'bar', + '30': 'bar', + '31': 'bar', + '32': 'bar', + '33': 'bar', + '34': 'bar', + '35': 'bar', + '36': 'bar', + '37': 'bar', + '38': 'bar', + '39': 'bar', + '40': 'bar', + '41': 'bar', + '42': 'bar', + '43': 'bar', + '44': 'bar', + '45': 'bar', + '46': 'bar', + '47': 'bar', + '48': 'bar', + '49': 'bar', + '50': 'bar', + '51': 'bar', + '52': 'bar', + '53': 'bar', + '54': 'bar', + '55': 'bar', + '56': 'bar', + '57': 'bar', + '58': 'bar', + '59': 'bar', + '60': 'bar', + '61': 'bar', + '62': 'bar', + '63': 'bar', + '64': 'bar', + '65': 'bar', + '66': 'bar', + '67': 'bar', + '68': 'bar', + '69': 'bar', + '70': 'bar', + '71': 'bar', + '72': 'bar', + '73': 'bar', + '74': 'bar', + '75': 'bar', + '76': 'bar', + '77': 'bar', + '78': 'bar', + '79': 'bar', + '80': 'bar', + '81': 'bar', + '82': 'bar', + '83': 'bar', + '84': 'bar', + '85': 'bar', + '86': 'bar', + '87': 'bar', + '88': 'bar', + '89': 'bar', + '90': 'bar', + '91': 'bar', + '92': 'bar', + '93': 'bar', + '94': 'bar', + '95': 'bar', + '96': 'bar', + '97': 'bar', + '98': 'bar', + '99': 'bar', + '100': 'bar', + '101': 'bar', + '102': 'bar', + '103': 'bar', + '104': 'bar', + '105': 'bar', + '106': 'bar', + '107': 'bar', + '108': 'bar', + '109': 'bar', + '110': 'bar', + '111': 'bar', + '112': 'bar', + '113': 'bar', + '114': 'bar', + '115': 'bar', + '116': 'bar', + '117': 'bar', + '118': 'bar', + '119': 'bar', + '120': 'bar', + '121': 'bar', + '122': 'bar', + '123': 'bar', + '124': 'bar', + '125': 'bar', + '126': 'bar', + '127': 'bar', + '128': 'bar', + '129': 'bar', + '130': 'bar', + '131': 'bar', + '132': 'bar', + '133': 'bar', + '134': 'bar', + '135': 'bar', + '136': 'bar', + '137': 'bar', + '138': 'bar', + '139': 'bar', + }, + type: 'custom', + }, + testing_criteria: [ + { + input: [{ content: 'content', role: 'role' }], + labels: ['string'], + model: 'model', + name: 'name', + passing_labels: ['string'], + type: 'label_model', + }, + ], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.evals.create({ + data_source_config: { + item_schema: { + '0': 'bar', + '1': 'bar', + '2': 'bar', + '3': 'bar', + '4': 'bar', + '5': 'bar', + '6': 'bar', + '7': 'bar', + '8': 'bar', + '9': 'bar', + '10': 'bar', + '11': 'bar', + '12': 'bar', + '13': 'bar', + '14': 'bar', + '15': 'bar', + '16': 'bar', + '17': 'bar', + '18': 'bar', + '19': 'bar', + '20': 'bar', + '21': 'bar', + '22': 'bar', + '23': 'bar', + '24': 'bar', + '25': 'bar', + '26': 'bar', + '27': 'bar', + '28': 'bar', + '29': 'bar', + '30': 'bar', + '31': 'bar', + '32': 'bar', + '33': 'bar', + '34': 'bar', + '35': 'bar', + '36': 'bar', + '37': 'bar', + '38': 'bar', + '39': 'bar', + '40': 'bar', + '41': 'bar', + '42': 'bar', + '43': 'bar', + '44': 'bar', + '45': 'bar', + '46': 'bar', + '47': 'bar', + '48': 'bar', + '49': 'bar', + '50': 'bar', + '51': 'bar', + '52': 'bar', + '53': 'bar', + '54': 'bar', + '55': 'bar', + '56': 'bar', + '57': 'bar', + '58': 'bar', + '59': 'bar', + '60': 'bar', + '61': 'bar', + '62': 'bar', + '63': 'bar', + '64': 'bar', + '65': 'bar', + '66': 'bar', + '67': 'bar', + '68': 'bar', + '69': 'bar', + '70': 'bar', + '71': 'bar', + '72': 'bar', + '73': 'bar', + '74': 'bar', + '75': 'bar', + '76': 'bar', + '77': 'bar', + '78': 'bar', + '79': 'bar', + '80': 'bar', + '81': 'bar', + '82': 'bar', + '83': 'bar', + '84': 'bar', + '85': 'bar', + '86': 'bar', + '87': 'bar', + '88': 'bar', + '89': 'bar', + '90': 'bar', + '91': 'bar', + '92': 'bar', + '93': 'bar', + '94': 'bar', + '95': 'bar', + '96': 'bar', + '97': 'bar', + '98': 'bar', + '99': 'bar', + '100': 'bar', + '101': 'bar', + '102': 'bar', + '103': 'bar', + '104': 'bar', + '105': 'bar', + '106': 'bar', + '107': 'bar', + '108': 'bar', + '109': 'bar', + '110': 'bar', + '111': 'bar', + '112': 'bar', + '113': 'bar', + '114': 'bar', + '115': 'bar', + '116': 'bar', + '117': 'bar', + '118': 'bar', + '119': 'bar', + '120': 'bar', + '121': 'bar', + '122': 'bar', + '123': 'bar', + '124': 'bar', + '125': 'bar', + '126': 'bar', + '127': 'bar', + '128': 'bar', + '129': 'bar', + '130': 'bar', + '131': 'bar', + '132': 'bar', + '133': 'bar', + '134': 'bar', + '135': 'bar', + '136': 'bar', + '137': 'bar', + '138': 'bar', + '139': 'bar', + }, + type: 'custom', + include_sample_schema: true, + }, + testing_criteria: [ + { + input: [{ content: 'content', role: 'role' }], + labels: ['string'], + model: 'model', + name: 'name', + passing_labels: ['string'], + type: 'label_model', + }, + ], + metadata: { foo: 'string' }, + name: 'name', + share_with_openai: true, + }); + }); + + test('retrieve', async () => { + const responsePromise = client.evals.retrieve('eval_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.evals.retrieve('eval_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('update', async () => { + const responsePromise = client.evals.update('eval_id', {}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list', async () => { + const responsePromise = client.evals.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.evals.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.list( + { after: 'after', limit: 0, order: 'asc', order_by: 'created_at' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.evals.del('eval_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.evals.del('eval_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); +}); diff --git a/tests/api-resources/evals/runs/output-items.test.ts b/tests/api-resources/evals/runs/output-items.test.ts new file mode 100644 index 000000000..ff075b404 --- /dev/null +++ b/tests/api-resources/evals/runs/output-items.test.ts @@ -0,0 +1,61 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource outputItems', () => { + test('retrieve', async () => { + const responsePromise = client.evals.runs.outputItems.retrieve('eval_id', 'run_id', 'output_item_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.outputItems.retrieve('eval_id', 'run_id', 'output_item_id', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.evals.runs.outputItems.list('eval_id', 'run_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.outputItems.list('eval_id', 'run_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.outputItems.list( + 'eval_id', + 'run_id', + { after: 'after', limit: 0, order: 'asc', status: 'fail' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/evals/runs/runs.test.ts b/tests/api-resources/evals/runs/runs.test.ts new file mode 100644 index 000000000..786df0ba1 --- /dev/null +++ b/tests/api-resources/evals/runs/runs.test.ts @@ -0,0 +1,118 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource runs', () => { + test('create: only required params', async () => { + const responsePromise = client.evals.runs.create('eval_id', { + data_source: { source: { content: [{ item: { foo: 'bar' } }], type: 'file_content' }, type: 'jsonl' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.evals.runs.create('eval_id', { + data_source: { + source: { content: [{ item: { foo: 'bar' }, sample: { foo: 'bar' } }], type: 'file_content' }, + type: 'jsonl', + }, + metadata: { foo: 'string' }, + name: 'name', + }); + }); + + test('retrieve', async () => { + const responsePromise = client.evals.runs.retrieve('eval_id', 'run_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.retrieve('eval_id', 'run_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.evals.runs.list('eval_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.evals.runs.list('eval_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.list( + 'eval_id', + { after: 'after', limit: 0, order: 'asc', status: 'queued' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.evals.runs.del('eval_id', 'run_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.del('eval_id', 'run_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('cancel', async () => { + const responsePromise = client.evals.runs.cancel('eval_id', 'run_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('cancel: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.cancel('eval_id', 'run_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts new file mode 100644 index 000000000..cb8c7a9a1 --- /dev/null +++ b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts @@ -0,0 +1,85 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource permissions', () => { + test('create: only required params', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.create( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + { project_ids: ['string'] }, + ); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.fineTuning.checkpoints.permissions.create( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + { project_ids: ['string'] }, + ); + }); + + test('retrieve', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.checkpoints.permissions.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('retrieve: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.checkpoints.permissions.retrieve( + 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + { after: 'after', limit: 0, order: 'ascending', project_id: 'project_id' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.del( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + ); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.checkpoints.permissions.del('ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); From a100f0a0e1d336f8a78c8bbd9e3703cda3f0c5d8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 19:44:39 +0000 Subject: [PATCH 494/533] chore(internal): fix examples (#1457) --- .stats.yml | 4 +- .../beta/threads/runs/runs.test.ts | 2 +- .../beta/threads/threads.test.ts | 2 +- tests/api-resources/evals/evals.test.ts | 293 +----------------- tests/api-resources/images.test.ts | 6 +- tests/api-resources/moderations.test.ts | 5 +- 6 files changed, 10 insertions(+), 302 deletions(-) diff --git a/.stats.yml b/.stats.yml index ebe07c137..4a82ee242 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-472fe3036ea745365257fe870c0330917fb3153705c2826f49873cd631319b0a.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-32de3bc513663c5fac922c49be41c222b6ee8c0b841d8966bcdfa489d441daa3.yml openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 -config_hash: ef19d36c307306f14f2e1cd5c834a151 +config_hash: d6c61213488683418adb860a9ee1501b diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 13ae89a00..4b2b8030b 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -37,7 +37,7 @@ describe('resource runs', () => { max_completion_tokens: 256, max_prompt_tokens: 256, metadata: { foo: 'string' }, - model: 'gpt-4o', + model: 'string', parallel_tool_calls: true, reasoning_effort: 'low', response_format: 'auto', diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index f26d6ec44..bc92a0c8a 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -121,7 +121,7 @@ describe('resource threads', () => { max_completion_tokens: 256, max_prompt_tokens: 256, metadata: { foo: 'string' }, - model: 'gpt-4o', + model: 'string', parallel_tool_calls: true, response_format: 'auto', stream: false, diff --git a/tests/api-resources/evals/evals.test.ts b/tests/api-resources/evals/evals.test.ts index 3aeb3e15c..fabc2602a 100644 --- a/tests/api-resources/evals/evals.test.ts +++ b/tests/api-resources/evals/evals.test.ts @@ -11,151 +11,7 @@ const client = new OpenAI({ describe('resource evals', () => { test('create: only required params', async () => { const responsePromise = client.evals.create({ - data_source_config: { - item_schema: { - '0': 'bar', - '1': 'bar', - '2': 'bar', - '3': 'bar', - '4': 'bar', - '5': 'bar', - '6': 'bar', - '7': 'bar', - '8': 'bar', - '9': 'bar', - '10': 'bar', - '11': 'bar', - '12': 'bar', - '13': 'bar', - '14': 'bar', - '15': 'bar', - '16': 'bar', - '17': 'bar', - '18': 'bar', - '19': 'bar', - '20': 'bar', - '21': 'bar', - '22': 'bar', - '23': 'bar', - '24': 'bar', - '25': 'bar', - '26': 'bar', - '27': 'bar', - '28': 'bar', - '29': 'bar', - '30': 'bar', - '31': 'bar', - '32': 'bar', - '33': 'bar', - '34': 'bar', - '35': 'bar', - '36': 'bar', - '37': 'bar', - '38': 'bar', - '39': 'bar', - '40': 'bar', - '41': 'bar', - '42': 'bar', - '43': 'bar', - '44': 'bar', - '45': 'bar', - '46': 'bar', - '47': 'bar', - '48': 'bar', - '49': 'bar', - '50': 'bar', - '51': 'bar', - '52': 'bar', - '53': 'bar', - '54': 'bar', - '55': 'bar', - '56': 'bar', - '57': 'bar', - '58': 'bar', - '59': 'bar', - '60': 'bar', - '61': 'bar', - '62': 'bar', - '63': 'bar', - '64': 'bar', - '65': 'bar', - '66': 'bar', - '67': 'bar', - '68': 'bar', - '69': 'bar', - '70': 'bar', - '71': 'bar', - '72': 'bar', - '73': 'bar', - '74': 'bar', - '75': 'bar', - '76': 'bar', - '77': 'bar', - '78': 'bar', - '79': 'bar', - '80': 'bar', - '81': 'bar', - '82': 'bar', - '83': 'bar', - '84': 'bar', - '85': 'bar', - '86': 'bar', - '87': 'bar', - '88': 'bar', - '89': 'bar', - '90': 'bar', - '91': 'bar', - '92': 'bar', - '93': 'bar', - '94': 'bar', - '95': 'bar', - '96': 'bar', - '97': 'bar', - '98': 'bar', - '99': 'bar', - '100': 'bar', - '101': 'bar', - '102': 'bar', - '103': 'bar', - '104': 'bar', - '105': 'bar', - '106': 'bar', - '107': 'bar', - '108': 'bar', - '109': 'bar', - '110': 'bar', - '111': 'bar', - '112': 'bar', - '113': 'bar', - '114': 'bar', - '115': 'bar', - '116': 'bar', - '117': 'bar', - '118': 'bar', - '119': 'bar', - '120': 'bar', - '121': 'bar', - '122': 'bar', - '123': 'bar', - '124': 'bar', - '125': 'bar', - '126': 'bar', - '127': 'bar', - '128': 'bar', - '129': 'bar', - '130': 'bar', - '131': 'bar', - '132': 'bar', - '133': 'bar', - '134': 'bar', - '135': 'bar', - '136': 'bar', - '137': 'bar', - '138': 'bar', - '139': 'bar', - }, - type: 'custom', - }, + data_source_config: { item_schema: { foo: 'bar' }, type: 'custom' }, testing_criteria: [ { input: [{ content: 'content', role: 'role' }], @@ -178,152 +34,7 @@ describe('resource evals', () => { test('create: required and optional params', async () => { const response = await client.evals.create({ - data_source_config: { - item_schema: { - '0': 'bar', - '1': 'bar', - '2': 'bar', - '3': 'bar', - '4': 'bar', - '5': 'bar', - '6': 'bar', - '7': 'bar', - '8': 'bar', - '9': 'bar', - '10': 'bar', - '11': 'bar', - '12': 'bar', - '13': 'bar', - '14': 'bar', - '15': 'bar', - '16': 'bar', - '17': 'bar', - '18': 'bar', - '19': 'bar', - '20': 'bar', - '21': 'bar', - '22': 'bar', - '23': 'bar', - '24': 'bar', - '25': 'bar', - '26': 'bar', - '27': 'bar', - '28': 'bar', - '29': 'bar', - '30': 'bar', - '31': 'bar', - '32': 'bar', - '33': 'bar', - '34': 'bar', - '35': 'bar', - '36': 'bar', - '37': 'bar', - '38': 'bar', - '39': 'bar', - '40': 'bar', - '41': 'bar', - '42': 'bar', - '43': 'bar', - '44': 'bar', - '45': 'bar', - '46': 'bar', - '47': 'bar', - '48': 'bar', - '49': 'bar', - '50': 'bar', - '51': 'bar', - '52': 'bar', - '53': 'bar', - '54': 'bar', - '55': 'bar', - '56': 'bar', - '57': 'bar', - '58': 'bar', - '59': 'bar', - '60': 'bar', - '61': 'bar', - '62': 'bar', - '63': 'bar', - '64': 'bar', - '65': 'bar', - '66': 'bar', - '67': 'bar', - '68': 'bar', - '69': 'bar', - '70': 'bar', - '71': 'bar', - '72': 'bar', - '73': 'bar', - '74': 'bar', - '75': 'bar', - '76': 'bar', - '77': 'bar', - '78': 'bar', - '79': 'bar', - '80': 'bar', - '81': 'bar', - '82': 'bar', - '83': 'bar', - '84': 'bar', - '85': 'bar', - '86': 'bar', - '87': 'bar', - '88': 'bar', - '89': 'bar', - '90': 'bar', - '91': 'bar', - '92': 'bar', - '93': 'bar', - '94': 'bar', - '95': 'bar', - '96': 'bar', - '97': 'bar', - '98': 'bar', - '99': 'bar', - '100': 'bar', - '101': 'bar', - '102': 'bar', - '103': 'bar', - '104': 'bar', - '105': 'bar', - '106': 'bar', - '107': 'bar', - '108': 'bar', - '109': 'bar', - '110': 'bar', - '111': 'bar', - '112': 'bar', - '113': 'bar', - '114': 'bar', - '115': 'bar', - '116': 'bar', - '117': 'bar', - '118': 'bar', - '119': 'bar', - '120': 'bar', - '121': 'bar', - '122': 'bar', - '123': 'bar', - '124': 'bar', - '125': 'bar', - '126': 'bar', - '127': 'bar', - '128': 'bar', - '129': 'bar', - '130': 'bar', - '131': 'bar', - '132': 'bar', - '133': 'bar', - '134': 'bar', - '135': 'bar', - '136': 'bar', - '137': 'bar', - '138': 'bar', - '139': 'bar', - }, - type: 'custom', - include_sample_schema: true, - }, + data_source_config: { item_schema: { foo: 'bar' }, type: 'custom', include_sample_schema: true }, testing_criteria: [ { input: [{ content: 'content', role: 'role' }], diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index 43e67b030..4f15e20ac 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -25,7 +25,7 @@ describe('resource images', () => { test('createVariation: required and optional params', async () => { const response = await client.images.createVariation({ image: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'dall-e-2', + model: 'string', n: 1, response_format: 'url', size: '1024x1024', @@ -52,7 +52,7 @@ describe('resource images', () => { image: await toFile(Buffer.from('# my file contents'), 'README.md'), prompt: 'A cute baby sea otter wearing a beret', mask: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'dall-e-2', + model: 'string', n: 1, response_format: 'url', size: '1024x1024', @@ -74,7 +74,7 @@ describe('resource images', () => { test('generate: required and optional params', async () => { const response = await client.images.generate({ prompt: 'A cute baby sea otter', - model: 'dall-e-3', + model: 'string', n: 1, quality: 'standard', response_format: 'url', diff --git a/tests/api-resources/moderations.test.ts b/tests/api-resources/moderations.test.ts index 64f9acf3c..107ce9974 100644 --- a/tests/api-resources/moderations.test.ts +++ b/tests/api-resources/moderations.test.ts @@ -21,9 +21,6 @@ describe('resource moderations', () => { }); test('create: required and optional params', async () => { - const response = await client.moderations.create({ - input: 'I want to kill them.', - model: 'omni-moderation-2024-09-26', - }); + const response = await client.moderations.create({ input: 'I want to kill them.', model: 'string' }); }); }); From 58f4559d952f6e56a8f27a6bcaba0acf295623df Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 19:46:32 +0000 Subject: [PATCH 495/533] chore(internal): skip broken test (#1458) --- .stats.yml | 2 +- .../fine-tuning/checkpoints/permissions.test.ts | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.stats.yml b/.stats.yml index 4a82ee242..c39ce1186 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-32de3bc513663c5fac922c49be41c222b6ee8c0b841d8966bcdfa489d441daa3.yml openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 -config_hash: d6c61213488683418adb860a9ee1501b +config_hash: 43dc8df20ffec9d1503f91866cb2b7d9 diff --git a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts index cb8c7a9a1..e7aceae3e 100644 --- a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts +++ b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts @@ -61,7 +61,8 @@ describe('resource permissions', () => { ).rejects.toThrow(OpenAI.NotFoundError); }); - test('del', async () => { + // OpenAPI spec is slightly incorrect + test.skip('del', async () => { const responsePromise = client.fineTuning.checkpoints.permissions.del( 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', ); @@ -74,7 +75,8 @@ describe('resource permissions', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - test('del: request options instead of params are passed correctly', async () => { + // OpenAPI spec is slightly incorrect + test.skip('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( client.fineTuning.checkpoints.permissions.del('ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', { From e9ca1a07691976f41492e3652e1cccea33a9b70b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 19:47:04 +0000 Subject: [PATCH 496/533] release: 4.93.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 15 +++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 19 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 837894bfb..bc3f36214 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.92.1" + ".": "4.93.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 105627c5b..e6a402af8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## 4.93.0 (2025-04-08) + +Full Changelog: [v4.92.1...v4.93.0](https://github.com/openai/openai-node/compare/v4.92.1...v4.93.0) + +### Features + +* **api:** Add evalapi to sdk ([#1456](https://github.com/openai/openai-node/issues/1456)) ([ee917e3](https://github.com/openai/openai-node/commit/ee917e3335fcf44e87a28e54ce8ddfdcdfab1652)) + + +### Chores + +* **internal:** fix examples ([#1457](https://github.com/openai/openai-node/issues/1457)) ([a3dd0dd](https://github.com/openai/openai-node/commit/a3dd0dde3e8ad9cc7a02cf203d4550f91d31a2ae)) +* **internal:** skip broken test ([#1458](https://github.com/openai/openai-node/issues/1458)) ([4d2f815](https://github.com/openai/openai-node/commit/4d2f815ba5f6c426f9c21f4c3db443166389bbf8)) +* **tests:** improve enum examples ([#1454](https://github.com/openai/openai-node/issues/1454)) ([ecabce2](https://github.com/openai/openai-node/commit/ecabce282a9fb60122310942f3b647dfefae5403)) + ## 4.92.1 (2025-04-07) Full Changelog: [v4.92.0...v4.92.1](https://github.com/openai/openai-node/compare/v4.92.0...v4.92.1) diff --git a/jsr.json b/jsr.json index b986198a1..b5e49671a 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.92.1", + "version": "4.93.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 58c231fda..b9316cbe3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.92.1", + "version": "4.93.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index bfae301de..c385afc4c 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.92.1'; // x-release-please-version +export const VERSION = '4.93.0'; // x-release-please-version From 554c3b142024bec8010474cd7e42b99a209d4daa Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 20:05:16 +0000 Subject: [PATCH 497/533] feat(api): manual updates --- .stats.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index c39ce1186..d4a4370a7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-32de3bc513663c5fac922c49be41c222b6ee8c0b841d8966bcdfa489d441daa3.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-44b20fa9d24544217fe6bb48852037537030a1ad29b202936425110744fe66fb.yml openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 -config_hash: 43dc8df20ffec9d1503f91866cb2b7d9 +config_hash: 69e3afd56ccb0f0f822a7a9dc130fc99 From d6b396b94d9ccf64ddfe945069012b6162225fa9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 09:26:35 +0000 Subject: [PATCH 498/533] chore: workaround build errors --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index d4a4370a7..9d8d07c6a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-44b20fa9d24544217fe6bb48852037537030a1ad29b202936425110744fe66fb.yml openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 -config_hash: 69e3afd56ccb0f0f822a7a9dc130fc99 +config_hash: 5ea32de61ff42fcf5e66cff8d9e247ea From 2d452879000c07f3ef4e775e19a527f5f6fa7b4c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 16:43:34 +0000 Subject: [PATCH 499/533] chore(internal): upload builds and expand CI branch coverage (#1460) --- .github/workflows/ci.yml | 37 +++++++++++++++++++++----------- scripts/utils/upload-artifact.sh | 25 +++++++++++++++++++++ 2 files changed, 50 insertions(+), 12 deletions(-) create mode 100755 scripts/utils/upload-artifact.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6e59bb3fa..bd57cd3e6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,20 +1,18 @@ name: CI on: push: - branches: - - master - - update-specs - pull_request: - branches: - - master - - next + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'preview-head/**' + - 'preview-base/**' + - 'preview/**' jobs: lint: name: lint runs-on: ubuntu-latest - - steps: - uses: actions/checkout@v4 @@ -32,8 +30,9 @@ jobs: build: name: build runs-on: ubuntu-latest - - + permissions: + contents: read + id-token: write steps: - uses: actions/checkout@v4 @@ -47,10 +46,24 @@ jobs: - name: Check build run: ./scripts/build + + - name: Get GitHub OIDC Token + if: github.repository == 'stainless-sdks/openai-node' + id: github-oidc + uses: actions/github-script@v6 + with: + script: core.setOutput('github_token', await core.getIDToken()); + + - name: Upload tarball + if: github.repository == 'stainless-sdks/openai-node' + env: + URL: https://pkg.stainless.com/s + AUTH: ${{ steps.github-oidc.outputs.github_token }} + SHA: ${{ github.sha }} + run: ./scripts/utils/upload-artifact.sh test: name: test runs-on: ubuntu-latest - steps: - uses: actions/checkout@v4 diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh new file mode 100755 index 000000000..0e8490199 --- /dev/null +++ b/scripts/utils/upload-artifact.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -exuo pipefail + +RESPONSE=$(curl -X POST "$URL" \ + -H "Authorization: Bearer $AUTH" \ + -H "Content-Type: application/json") + +SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url') + +if [[ "$SIGNED_URL" == "null" ]]; then + echo -e "\033[31mFailed to get signed URL.\033[0m" + exit 1 +fi + +UPLOAD_RESPONSE=$(tar -cz dist | curl -v -X PUT \ + -H "Content-Type: application/gzip" \ + --data-binary @- "$SIGNED_URL" 2>&1) + +if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then + echo -e "\033[32mUploaded build to Stainless storage.\033[0m" + echo -e "\033[32mInstallation: npm install 'https://pkg.stainless.com/s/openai-node/$SHA'\033[0m" +else + echo -e "\033[31mFailed to upload artifact.\033[0m" + exit 1 +fi From 77fc77f7d05d03eafe6c8f002044c60c4bab3c64 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 21:08:51 +0000 Subject: [PATCH 500/533] chore(internal): reduce CI branch coverage --- .github/workflows/ci.yml | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bd57cd3e6..2ed1eead8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,13 +1,12 @@ name: CI on: push: - branches-ignore: - - 'generated' - - 'codegen/**' - - 'integrated/**' - - 'preview-head/**' - - 'preview-base/**' - - 'preview/**' + branches: + - master + pull_request: + branches: + - master + - next jobs: lint: From 6558b7ca8aef2f98f47a07bc206eb4a789097510 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 14:30:54 +0000 Subject: [PATCH 501/533] chore(client): minor internal fixes --- src/core.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/core.ts b/src/core.ts index ccc677e0e..cfd4eeaa6 100644 --- a/src/core.ts +++ b/src/core.ts @@ -331,10 +331,10 @@ export abstract class APIClient { } buildRequest( - options: FinalRequestOptions, + inputOptions: FinalRequestOptions, { retryCount = 0 }: { retryCount?: number } = {}, ): { req: RequestInit; url: string; timeout: number } { - options = { ...options }; + const options = { ...inputOptions }; const { method, path, query, headers: headers = {} } = options; const body = @@ -362,8 +362,8 @@ export abstract class APIClient { } if (this.idempotencyHeader && method !== 'get') { - if (!options.idempotencyKey) options.idempotencyKey = this.defaultIdempotencyKey(); - headers[this.idempotencyHeader] = options.idempotencyKey; + if (!inputOptions.idempotencyKey) inputOptions.idempotencyKey = this.defaultIdempotencyKey(); + headers[this.idempotencyHeader] = inputOptions.idempotencyKey; } const reqHeaders = this.buildHeaders({ options, headers, contentLength, retryCount }); From 840e7de7870835488d4c823d97afdf5d53a739be Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 16:40:57 +0000 Subject: [PATCH 502/533] feat(api): adding gpt-4.1 family of model IDs --- .stats.yml | 4 ++-- src/resources/beta/assistants.ts | 6 ++++++ src/resources/shared.ts | 6 ++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9d8d07c6a..b40485bd0 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-44b20fa9d24544217fe6bb48852037537030a1ad29b202936425110744fe66fb.yml -openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a555f81249cb084f463dcefa4aba069f9341fdaf3dd6ac27d7f237fc90e8f488.yml +openapi_spec_hash: 8e590296cd1a54b9508510b0c7a2c45a config_hash: 5ea32de61ff42fcf5e66cff8d9e247ea diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 0668dcf54..bf957db95 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -1337,6 +1337,12 @@ export interface AssistantUpdateParams { */ model?: | (string & {}) + | 'gpt-4.1' + | 'gpt-4.1-mini' + | 'gpt-4.1-nano' + | 'gpt-4.1-2025-04-14' + | 'gpt-4.1-mini-2025-04-14' + | 'gpt-4.1-nano-2025-04-14' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o1' diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 3e8ded763..94ef50585 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -9,6 +9,12 @@ export type AllModels = | 'computer-use-preview-2025-03-11'; export type ChatModel = + | 'gpt-4.1' + | 'gpt-4.1-mini' + | 'gpt-4.1-nano' + | 'gpt-4.1-2025-04-14' + | 'gpt-4.1-mini-2025-04-14' + | 'gpt-4.1-nano-2025-04-14' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o1' From bd501650af112dd69a2b220beadbb30f42cd9f77 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 16:41:30 +0000 Subject: [PATCH 503/533] release: 4.94.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 17 +++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 21 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index bc3f36214..12b27aa8e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.93.0" + ".": "4.94.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index e6a402af8..d4e7613fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,22 @@ # Changelog +## 4.94.0 (2025-04-14) + +Full Changelog: [v4.93.0...v4.94.0](https://github.com/openai/openai-node/compare/v4.93.0...v4.94.0) + +### Features + +* **api:** adding gpt-4.1 family of model IDs ([bddcbcf](https://github.com/openai/openai-node/commit/bddcbcffdc409ffc8a078a65bbd302cd50b35ff0)) +* **api:** manual updates ([7532f48](https://github.com/openai/openai-node/commit/7532f48ad25c5125064a59985587c20c47a2cbfb)) + + +### Chores + +* **client:** minor internal fixes ([d342f17](https://github.com/openai/openai-node/commit/d342f17e2642da5ee83d080b410dc3c4fe153814)) +* **internal:** reduce CI branch coverage ([a49b94a](https://github.com/openai/openai-node/commit/a49b94a9aebd3e30e1802fff633e1b46cfb81942)) +* **internal:** upload builds and expand CI branch coverage ([#1460](https://github.com/openai/openai-node/issues/1460)) ([7e23bb4](https://github.com/openai/openai-node/commit/7e23bb4f4a09303195b612cc5b393cc41c1d855b)) +* workaround build errors ([913eba8](https://github.com/openai/openai-node/commit/913eba828d116f49fa78b219c62274c1e95c6f17)) + ## 4.93.0 (2025-04-08) Full Changelog: [v4.92.1...v4.93.0](https://github.com/openai/openai-node/compare/v4.92.1...v4.93.0) diff --git a/jsr.json b/jsr.json index b5e49671a..891e18dcb 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.93.0", + "version": "4.94.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index b9316cbe3..a399b6cf6 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.93.0", + "version": "4.94.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c385afc4c..4a35de04b 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.93.0'; // x-release-please-version +export const VERSION = '4.94.0'; // x-release-please-version From a0d000094f69db82974de4ba792cd07d4ab59c21 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 16 Apr 2025 16:42:31 +0000 Subject: [PATCH 504/533] feat(api): add o3 and o4-mini model IDs --- .stats.yml | 6 +- src/resources/chat/completions/completions.ts | 55 ++++++++++++++++--- src/resources/completions.ts | 2 + src/resources/responses/responses.ts | 46 +++++++++++++++- src/resources/shared.ts | 19 +++++-- .../api-resources/responses/responses.test.ts | 3 +- 6 files changed, 112 insertions(+), 19 deletions(-) diff --git a/.stats.yml b/.stats.yml index b40485bd0..848c5b5ad 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a555f81249cb084f463dcefa4aba069f9341fdaf3dd6ac27d7f237fc90e8f488.yml -openapi_spec_hash: 8e590296cd1a54b9508510b0c7a2c45a -config_hash: 5ea32de61ff42fcf5e66cff8d9e247ea +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5633633cc38734869cf7d993f7b549bb8e4d10e0ec45381ec2cd91507cd8eb8f.yml +openapi_spec_hash: c855121b2b2324b99499c9244c21d24d +config_hash: d20837393b73efdb19cd08e04c1cc9a1 diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index f0ef1d0cc..17edac02c 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -140,9 +140,25 @@ export interface ChatCompletion { object: 'chat.completion'; /** - * The service tier used for processing the request. + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. */ - service_tier?: 'scale' | 'default' | null; + service_tier?: 'auto' | 'default' | 'flex' | null; /** * This fingerprint represents the backend configuration that the model runs with. @@ -319,11 +335,11 @@ export interface ChatCompletionAudioParam { * Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, * or `pcm16`. */ - format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; + format: 'wav' | 'aac' | 'mp3' | 'flac' | 'opus' | 'pcm16'; /** * The voice the model uses to respond. Supported voices are `alloy`, `ash`, - * `ballad`, `coral`, `echo`, `sage`, and `shimmer`. + * `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`. */ voice: | (string & {}) @@ -375,9 +391,25 @@ export interface ChatCompletionChunk { object: 'chat.completion.chunk'; /** - * The service tier used for processing the request. + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. */ - service_tier?: 'scale' | 'default' | null; + service_tier?: 'auto' | 'default' | 'flex' | null; /** * This fingerprint represents the backend configuration that the model runs with. @@ -1114,7 +1146,7 @@ export interface ChatCompletionCreateParamsBase { messages: Array; /** - * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a * wide range of models with different capabilities, performance characteristics, * and price points. Refer to the * [model guide](https://platform.openai.com/docs/models) to browse and compare @@ -1194,7 +1226,7 @@ export interface ChatCompletionCreateParamsBase { * * This value is now deprecated in favor of `max_completion_tokens`, and is not * compatible with - * [o1 series models](https://platform.openai.com/docs/guides/reasoning). + * [o-series models](https://platform.openai.com/docs/guides/reasoning). */ max_tokens?: number | null; @@ -1296,14 +1328,19 @@ export interface ChatCompletionCreateParamsBase { * latency guarentee. * - If set to 'default', the request will be processed using the default service * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). * - When not set, the default behavior is 'auto'. * * When this parameter is set, the response body will include the `service_tier` * utilized. */ - service_tier?: 'auto' | 'default' | null; + service_tier?: 'auto' | 'default' | 'flex' | null; /** + * Not supported with latest reasoning models `o3` and `o4-mini`. + * * Up to 4 sequences where the API will stop generating further tokens. The * returned text will not contain the stop sequence. */ diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 664e39d9d..5cbec5e3c 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -293,6 +293,8 @@ export interface CompletionCreateParamsBase { seed?: number | null; /** + * Not supported with latest reasoning models `o3` and `o4-mini`. + * * Up to 4 sequences where the API will stop generating further tokens. The * returned text will not contain the stop sequence. */ diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index a46c4182c..52dd079fc 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -321,7 +321,7 @@ export interface Response { metadata: Shared.Metadata | null; /** - * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a * wide range of models with different capabilities, performance characteristics, * and price points. Refer to the * [model guide](https://platform.openai.com/docs/models) to browse and compare @@ -414,6 +414,27 @@ export interface Response { */ reasoning?: Shared.Reasoning | null; + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + service_tier?: 'auto' | 'default' | 'flex' | null; + /** * The status of the response generation. One of `completed`, `failed`, * `in_progress`, or `incomplete`. @@ -2673,7 +2694,7 @@ export interface ResponseCreateParamsBase { input: string | ResponseInput; /** - * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a * wide range of models with different capabilities, performance characteristics, * and price points. Refer to the * [model guide](https://platform.openai.com/docs/models) to browse and compare @@ -2740,6 +2761,27 @@ export interface ResponseCreateParamsBase { */ reasoning?: Shared.Reasoning | null; + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + service_tier?: 'auto' | 'default' | 'flex' | null; + /** * Whether to store the generated model response for later retrieval via API. */ diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 94ef50585..1c0006b18 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -15,6 +15,10 @@ export type ChatModel = | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano-2025-04-14' + | 'o4-mini' + | 'o4-mini-2025-04-16' + | 'o3' + | 'o3-2025-04-16' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o1' @@ -187,13 +191,20 @@ export interface Reasoning { effort?: ReasoningEffort | null; /** - * **computer_use_preview only** + * @deprecated **Deprecated:** use `summary` instead. * * A summary of the reasoning performed by the model. This can be useful for - * debugging and understanding the model's reasoning process. One of `concise` or - * `detailed`. + * debugging and understanding the model's reasoning process. One of `auto`, + * `concise`, or `detailed`. */ - generate_summary?: 'concise' | 'detailed' | null; + generate_summary?: 'auto' | 'concise' | 'detailed' | null; + + /** + * A summary of the reasoning performed by the model. This can be useful for + * debugging and understanding the model's reasoning process. One of `auto`, + * `concise`, or `detailed`. + */ + summary?: 'auto' | 'concise' | 'detailed' | null; } /** diff --git a/tests/api-resources/responses/responses.test.ts b/tests/api-resources/responses/responses.test.ts index e10722738..cf7e9cf3c 100644 --- a/tests/api-resources/responses/responses.test.ts +++ b/tests/api-resources/responses/responses.test.ts @@ -30,7 +30,8 @@ describe('resource responses', () => { metadata: { foo: 'string' }, parallel_tool_calls: true, previous_response_id: 'previous_response_id', - reasoning: { effort: 'low', generate_summary: 'concise' }, + reasoning: { effort: 'low', generate_summary: 'auto', summary: 'auto' }, + service_tier: 'auto', store: true, stream: false, temperature: 1, From b5a5ee43837778adae925f6abcbc395b30ed826b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 16 Apr 2025 16:43:04 +0000 Subject: [PATCH 505/533] release: 4.95.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 12b27aa8e..077a9fd7a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.94.0" + ".": "4.95.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d4e7613fd..3f5f1ef26 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.95.0 (2025-04-16) + +Full Changelog: [v4.94.0...v4.95.0](https://github.com/openai/openai-node/compare/v4.94.0...v4.95.0) + +### Features + +* **api:** add o3 and o4-mini model IDs ([4845cd9](https://github.com/openai/openai-node/commit/4845cd9ac17450022f1632ae01397e41a97f1662)) + ## 4.94.0 (2025-04-14) Full Changelog: [v4.93.0...v4.94.0](https://github.com/openai/openai-node/compare/v4.93.0...v4.94.0) diff --git a/jsr.json b/jsr.json index 891e18dcb..d6cacf5f9 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.94.0", + "version": "4.95.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index a399b6cf6..512e2ee55 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.94.0", + "version": "4.95.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 4a35de04b..5f581d42d 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.94.0'; // x-release-please-version +export const VERSION = '4.95.0'; // x-release-please-version From aea2d123d200e6a7eae11e66583127270a8db8bf Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 18 Apr 2025 16:49:09 +0100 Subject: [PATCH 506/533] fix(zod): warn on optional field usage (#1469) --- .../zod-to-json-schema/parsers/object.ts | 12 ++++- tests/helpers/zod.test.ts | 52 +++++++++++++++++++ 2 files changed, 62 insertions(+), 2 deletions(-) diff --git a/src/_vendor/zod-to-json-schema/parsers/object.ts b/src/_vendor/zod-to-json-schema/parsers/object.ts index f2120c8fe..25e5db116 100644 --- a/src/_vendor/zod-to-json-schema/parsers/object.ts +++ b/src/_vendor/zod-to-json-schema/parsers/object.ts @@ -39,12 +39,20 @@ export function parseObjectDef(def: ZodObjectDef, refs: Refs) { [propName, propDef], ) => { if (propDef === undefined || propDef._def === undefined) return acc; + const propertyPath = [...refs.currentPath, 'properties', propName]; const parsedDef = parseDef(propDef._def, { ...refs, - currentPath: [...refs.currentPath, 'properties', propName], - propertyPath: [...refs.currentPath, 'properties', propName], + currentPath: propertyPath, + propertyPath, }); if (parsedDef === undefined) return acc; + if (refs.openaiStrictMode && propDef.isOptional() && !propDef.isNullable()) { + console.warn( + `Zod field at \`${propertyPath.join( + '/', + )}\` uses \`.optional()\` without \`.nullable()\` which is not supported by the API. See: https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#all-fields-must-be-required\nThis will become an error in a future version of the SDK.`, + ); + } return { properties: { ...acc.properties, diff --git a/tests/helpers/zod.test.ts b/tests/helpers/zod.test.ts index 493b4c0c8..02d8a7a8f 100644 --- a/tests/helpers/zod.test.ts +++ b/tests/helpers/zod.test.ts @@ -278,4 +278,56 @@ describe('zodResponseFormat', () => { } `); }); + + it('warns on optional fields', () => { + const consoleSpy = jest.spyOn(console, 'warn'); + consoleSpy.mockClear(); + + zodResponseFormat( + z.object({ + required: z.string(), + optional: z.string().optional(), + optional_and_nullable: z.string().optional().nullable(), + }), + 'schema', + ); + + expect(consoleSpy).toHaveBeenCalledWith( + 'Zod field at `#/definitions/schema/properties/optional` uses `.optional()` without `.nullable()` which is not supported by the API. See: https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#all-fields-must-be-required\nThis will become an error in a future version of the SDK.', + ); + expect(consoleSpy).toHaveBeenCalledTimes(1); + }); + + it('warns on nested optional fields', () => { + const consoleSpy = jest.spyOn(console, 'warn'); + consoleSpy.mockClear(); + + zodResponseFormat( + z.object({ + foo: z.object({ bar: z.array(z.object({ can_be_missing: z.boolean().optional() })) }), + }), + 'schema', + ); + + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'Zod field at `#/definitions/schema/properties/foo/properties/bar/items/properties/can_be_missing` uses `.optional()`', + ), + ); + expect(consoleSpy).toHaveBeenCalledTimes(1); + }); + + it('does not warn on union nullable fields', () => { + const consoleSpy = jest.spyOn(console, 'warn'); + consoleSpy.mockClear(); + + zodResponseFormat( + z.object({ + union: z.union([z.string(), z.null()]).optional(), + }), + 'schema', + ); + + expect(consoleSpy).toHaveBeenCalledTimes(0); + }); }); From 2785c1186b528e4ab3a2a7c9282e041aaa4c13f6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 18 Apr 2025 15:50:02 +0000 Subject: [PATCH 507/533] release: 4.95.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 077a9fd7a..2f61d58b0 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.95.0" + ".": "4.95.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f5f1ef26..1f864e203 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.95.1 (2025-04-18) + +Full Changelog: [v4.95.0...v4.95.1](https://github.com/openai/openai-node/compare/v4.95.0...v4.95.1) + +### Bug Fixes + +* **zod:** warn on optional field usage ([#1469](https://github.com/openai/openai-node/issues/1469)) ([aea2d12](https://github.com/openai/openai-node/commit/aea2d123d200e6a7eae11e66583127270a8db8bf)) + ## 4.95.0 (2025-04-16) Full Changelog: [v4.94.0...v4.95.0](https://github.com/openai/openai-node/compare/v4.94.0...v4.95.0) diff --git a/jsr.json b/jsr.json index d6cacf5f9..8271c8522 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.95.0", + "version": "4.95.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 512e2ee55..76fe7d4d0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.95.0", + "version": "4.95.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 5f581d42d..cd1995322 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.95.0'; // x-release-please-version +export const VERSION = '4.95.1'; // x-release-please-version From bc492ba124cddd545eec7a1199712452c573a7a4 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 18 Apr 2025 17:56:50 +0100 Subject: [PATCH 508/533] fix(types): export AssistantStream (#1472) --- src/resources/beta/assistants.ts | 3 +++ src/resources/beta/threads/threads.ts | 2 ++ 2 files changed, 5 insertions(+) diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index bf957db95..00a6ff2cf 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -9,6 +9,7 @@ import * as ThreadsAPI from './threads/threads'; import * as RunsAPI from './threads/runs/runs'; import * as StepsAPI from './threads/runs/steps'; import { CursorPage, type CursorPageParams } from '../../pagination'; +import { AssistantStream } from '../../lib/AssistantStream'; export class Assistants extends APIResource { /** @@ -1517,4 +1518,6 @@ export declare namespace Assistants { type AssistantUpdateParams as AssistantUpdateParams, type AssistantListParams as AssistantListParams, }; + + export { AssistantStream }; } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 8075ba0ac..1e0077a3f 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -1718,4 +1718,6 @@ export declare namespace Threads { type MessageUpdateParams as MessageUpdateParams, type MessageListParams as MessageListParams, }; + + export { AssistantStream }; } From 939f6365c304c037e0473207d85bbc2f2731b105 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Apr 2025 20:12:57 +0000 Subject: [PATCH 509/533] chore(ci): add timeout thresholds for CI jobs --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2ed1eead8..b0aac41b2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,6 +10,7 @@ on: jobs: lint: + timeout-minutes: 10 name: lint runs-on: ubuntu-latest steps: @@ -27,6 +28,7 @@ jobs: run: ./scripts/lint build: + timeout-minutes: 5 name: build runs-on: ubuntu-latest permissions: @@ -61,6 +63,7 @@ jobs: SHA: ${{ github.sha }} run: ./scripts/utils/upload-artifact.sh test: + timeout-minutes: 10 name: test runs-on: ubuntu-latest steps: @@ -78,6 +81,7 @@ jobs: run: ./scripts/test examples: + timeout-minutes: 10 name: examples runs-on: ubuntu-latest if: github.repository == 'openai/openai-node' From c353531a238863e7f386a66dcee9f02b8115dd47 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Apr 2025 16:30:01 +0000 Subject: [PATCH 510/533] feat(api): adding new image model support --- .stats.yml | 6 +- api.md | 6 +- src/resources/beta/realtime/realtime.ts | 98 +- src/resources/beta/threads/threads.ts | 4 +- src/resources/evals/evals.ts | 749 +++++++- src/resources/evals/runs/runs.ts | 1699 ++++++++++++++--- .../fine-tuning/checkpoints/permissions.ts | 6 +- src/resources/images.ts | 202 +- src/resources/responses/responses.ts | 162 ++ tests/api-resources/evals/evals.test.ts | 1 - .../checkpoints/permissions.test.ts | 15 +- tests/api-resources/images.test.ts | 7 +- 12 files changed, 2534 insertions(+), 421 deletions(-) diff --git a/.stats.yml b/.stats.yml index 848c5b5ad..d92408173 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5633633cc38734869cf7d993f7b549bb8e4d10e0ec45381ec2cd91507cd8eb8f.yml -openapi_spec_hash: c855121b2b2324b99499c9244c21d24d -config_hash: d20837393b73efdb19cd08e04c1cc9a1 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-8b68ae6b807dca92e914da1dd9e835a20f69b075e79102a264367fd7fddddb33.yml +openapi_spec_hash: b6ade5b1a6327339e6669e1134de2d03 +config_hash: b597cd9a31e9e5ec709e2eefb4c54122 diff --git a/api.md b/api.md index 2eb54b34a..49e6548a8 100644 --- a/api.md +++ b/api.md @@ -249,7 +249,7 @@ Methods: - client.fineTuning.checkpoints.permissions.create(fineTunedModelCheckpoint, { ...params }) -> PermissionCreateResponsesPage - client.fineTuning.checkpoints.permissions.retrieve(fineTunedModelCheckpoint, { ...params }) -> PermissionRetrieveResponse -- client.fineTuning.checkpoints.permissions.del(fineTunedModelCheckpoint) -> PermissionDeleteResponse +- client.fineTuning.checkpoints.permissions.del(fineTunedModelCheckpoint, permissionId) -> PermissionDeleteResponse # VectorStores @@ -626,6 +626,10 @@ Types: - ResponseOutputRefusal - ResponseOutputText - ResponseReasoningItem +- ResponseReasoningSummaryPartAddedEvent +- ResponseReasoningSummaryPartDoneEvent +- ResponseReasoningSummaryTextDeltaEvent +- ResponseReasoningSummaryTextDoneEvent - ResponseRefusalDeltaEvent - ResponseRefusalDoneEvent - ResponseStatus diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 1c02fdd1a..5012b1edd 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -915,12 +915,34 @@ export type RealtimeClientEvent = | ConversationItemTruncateEvent | InputAudioBufferAppendEvent | InputAudioBufferClearEvent + | RealtimeClientEvent.OutputAudioBufferClear | InputAudioBufferCommitEvent | ResponseCancelEvent | ResponseCreateEvent | SessionUpdateEvent | TranscriptionSessionUpdate; +export namespace RealtimeClientEvent { + /** + * **WebRTC Only:** Emit to cut off the current audio response. This will trigger + * the server to stop generating audio and emit a `output_audio_buffer.cleared` + * event. This event should be preceded by a `response.cancel` client event to stop + * the generation of the current response. + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferClear { + /** + * The event type, must be `output_audio_buffer.clear`. + */ + type: 'output_audio_buffer.clear'; + + /** + * The unique ID of the client event used for error handling. + */ + event_id?: string; + } +} + /** * The response resource. */ @@ -1174,7 +1196,10 @@ export type RealtimeServerEvent = | ResponseTextDoneEvent | SessionCreatedEvent | SessionUpdatedEvent - | TranscriptionSessionUpdatedEvent; + | TranscriptionSessionUpdatedEvent + | RealtimeServerEvent.OutputAudioBufferStarted + | RealtimeServerEvent.OutputAudioBufferStopped + | RealtimeServerEvent.OutputAudioBufferCleared; export namespace RealtimeServerEvent { /** @@ -1197,6 +1222,77 @@ export namespace RealtimeServerEvent { */ type: 'conversation.item.retrieved'; } + + /** + * **WebRTC Only:** Emitted when the server begins streaming audio to the client. + * This event is emitted after an audio content part has been added + * (`response.content_part.added`) to the response. + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferStarted { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The unique ID of the response that produced the audio. + */ + response_id: string; + + /** + * The event type, must be `output_audio_buffer.started`. + */ + type: 'output_audio_buffer.started'; + } + + /** + * **WebRTC Only:** Emitted when the output audio buffer has been completely + * drained on the server, and no more audio is forthcoming. This event is emitted + * after the full response data has been sent to the client (`response.done`). + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferStopped { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The unique ID of the response that produced the audio. + */ + response_id: string; + + /** + * The event type, must be `output_audio_buffer.stopped`. + */ + type: 'output_audio_buffer.stopped'; + } + + /** + * **WebRTC Only:** Emitted when the output audio buffer is cleared. This happens + * either in VAD mode when the user has interrupted + * (`input_audio_buffer.speech_started`), or when the client has emitted the + * `output_audio_buffer.clear` event to manually cut off the current audio + * response. + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferCleared { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The unique ID of the response that produced the audio. + */ + response_id: string; + + /** + * The event type, must be `output_audio_buffer.cleared`. + */ + type: 'output_audio_buffer.cleared'; + } } /** diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 1e0077a3f..2e5ab1cc8 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -686,9 +686,7 @@ export interface ThreadCreateAndRunParamsBase { * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. */ - tools?: Array< - AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool | AssistantsAPI.FunctionTool - > | null; + tools?: Array | null; /** * An alternative to sampling with temperature, called nucleus sampling, where the diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index 84ff6d1bb..caef7acc0 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -4,6 +4,7 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as Shared from '../shared'; +import * as ResponsesAPI from '../responses/responses'; import * as RunsAPI from './runs/runs'; import { CreateEvalCompletionsRunDataSource, @@ -107,7 +108,7 @@ export interface EvalCustomDataSourceConfig { * the evaluation. */ export interface EvalLabelModelGrader { - input: Array; + input: Array; /** * The labels to assign to each item in the evaluation. @@ -136,57 +137,43 @@ export interface EvalLabelModelGrader { } export namespace EvalLabelModelGrader { - export interface InputMessage { - content: InputMessage.Content; - + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { /** - * The role of the message. One of `user`, `system`, or `developer`. + * Text inputs to the model - can contain template strings. */ - role: 'user' | 'system' | 'developer'; + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; /** - * The type of item, which is always `message`. + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. */ - type: 'message'; - } - - export namespace InputMessage { - export interface Content { - /** - * The text content. - */ - text: string; - - /** - * The type of content, which is always `input_text`. - */ - type: 'input_text'; - } - } - - export interface Assistant { - content: Assistant.Content; + role: 'user' | 'assistant' | 'system' | 'developer'; /** - * The role of the message. Must be `assistant` for output. + * The type of the message input. Always `message`. */ - role: 'assistant'; + type?: 'message'; + } + export namespace Input { /** - * The type of item, which is always `message`. + * A text output from the model. */ - type: 'message'; - } - - export namespace Assistant { - export interface Content { + export interface OutputText { /** - * The text content. + * The text output from the model. */ text: string; /** - * The type of content, which is always `output_text`. + * The type of the output text. Always `output_text`. */ type: 'output_text'; } @@ -259,8 +246,8 @@ export interface EvalStringCheckGrader { */ export interface EvalTextSimilarityGrader { /** - * The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, - * `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + * The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, + * `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. */ evaluation_metric: | 'fuzzy_match' @@ -272,8 +259,7 @@ export interface EvalTextSimilarityGrader { | 'rouge_3' | 'rouge_4' | 'rouge_5' - | 'rouge_l' - | 'cosine'; + | 'rouge_l'; /** * The text being graded. @@ -346,14 +332,131 @@ export interface EvalCreateResponse { object: 'eval'; /** - * Indicates whether the evaluation is shared with OpenAI. + * A list of testing criteria. */ - share_with_openai: boolean; + testing_criteria: Array< + | EvalLabelModelGrader + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalCreateResponse.Python + | EvalCreateResponse.ScoreModel + >; +} +export namespace EvalCreateResponse { /** - * A list of testing criteria. + * A PythonGrader object that runs a python script on the input. */ - testing_criteria: Array; + export interface Python { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } } /** @@ -401,14 +504,131 @@ export interface EvalRetrieveResponse { object: 'eval'; /** - * Indicates whether the evaluation is shared with OpenAI. + * A list of testing criteria. */ - share_with_openai: boolean; + testing_criteria: Array< + | EvalLabelModelGrader + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalRetrieveResponse.Python + | EvalRetrieveResponse.ScoreModel + >; +} +export namespace EvalRetrieveResponse { /** - * A list of testing criteria. + * A PythonGrader object that runs a python script on the input. + */ + export interface Python { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. */ - testing_criteria: Array; + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } } /** @@ -456,14 +676,131 @@ export interface EvalUpdateResponse { object: 'eval'; /** - * Indicates whether the evaluation is shared with OpenAI. + * A list of testing criteria. + */ + testing_criteria: Array< + | EvalLabelModelGrader + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalUpdateResponse.Python + | EvalUpdateResponse.ScoreModel + >; +} + +export namespace EvalUpdateResponse { + /** + * A PythonGrader object that runs a python script on the input. */ - share_with_openai: boolean; + export interface Python { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } /** - * A list of testing criteria. + * A ScoreModelGrader object that uses a model to assign a score to the input. */ - testing_criteria: Array; + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } } /** @@ -511,14 +848,131 @@ export interface EvalListResponse { object: 'eval'; /** - * Indicates whether the evaluation is shared with OpenAI. + * A list of testing criteria. */ - share_with_openai: boolean; + testing_criteria: Array< + | EvalLabelModelGrader + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalListResponse.Python + | EvalListResponse.ScoreModel + >; +} +export namespace EvalListResponse { /** - * A list of testing criteria. + * A PythonGrader object that runs a python script on the input. + */ + export interface Python { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. */ - testing_criteria: Array; + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } } export interface EvalDeleteResponse { @@ -533,12 +987,18 @@ export interface EvalCreateParams { /** * The configuration for the data source used for the evaluation runs. */ - data_source_config: EvalCreateParams.Custom | EvalCreateParams.StoredCompletions; + data_source_config: EvalCreateParams.Custom | EvalCreateParams.Logs; /** * A list of graders for all eval runs in this group. */ - testing_criteria: Array; + testing_criteria: Array< + | EvalCreateParams.LabelModel + | EvalStringCheckGrader + | EvalTextSimilarityGrader + | EvalCreateParams.Python + | EvalCreateParams.ScoreModel + >; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -554,11 +1014,6 @@ export interface EvalCreateParams { * The name of the evaluation. */ name?: string; - - /** - * Indicates whether the evaluation is shared with OpenAI. - */ - share_with_openai?: boolean; } export namespace EvalCreateParams { @@ -572,7 +1027,7 @@ export namespace EvalCreateParams { */ export interface Custom { /** - * The json schema for the run data source items. + * The json schema for each row in the data source. */ item_schema: Record; @@ -582,7 +1037,8 @@ export namespace EvalCreateParams { type: 'custom'; /** - * Whether to include the sample schema in the data source. + * Whether the eval should expect you to populate the sample namespace (ie, by + * generating responses off of your data source) */ include_sample_schema?: boolean; } @@ -592,21 +1048,16 @@ export namespace EvalCreateParams { * completions query. This is usually metadata like `usecase=chatbot` or * `prompt-version=v2`, etc. */ - export interface StoredCompletions { + export interface Logs { /** - * The type of data source. Always `stored_completions`. + * The type of data source. Always `logs`. */ - type: 'stored_completions'; + type: 'logs'; /** - * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format, and - * querying for objects via API or the dashboard. - * - * Keys are strings with a maximum length of 64 characters. Values are strings with - * a maximum length of 512 characters. + * Metadata filters for the logs data source. */ - metadata?: Shared.Metadata | null; + metadata?: Record; } /** @@ -614,7 +1065,11 @@ export namespace EvalCreateParams { * the evaluation. */ export interface LabelModel { - input: Array; + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + input: Array; /** * The labels to classify to each item in the evaluation. @@ -655,57 +1110,157 @@ export namespace EvalCreateParams { role: string; } - export interface InputMessage { - content: InputMessage.Content; + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; /** - * The role of the message. One of `user`, `system`, or `developer`. + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. */ - role: 'user' | 'system' | 'developer'; + role: 'user' | 'assistant' | 'system' | 'developer'; /** - * The type of item, which is always `message`. + * The type of the message input. Always `message`. */ - type: 'message'; + type?: 'message'; } - export namespace InputMessage { - export interface Content { + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { /** - * The text content. + * The text output from the model. */ text: string; /** - * The type of content, which is always `input_text`. + * The type of the output text. Always `output_text`. */ - type: 'input_text'; + type: 'output_text'; } } + } + + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface Python { + /** + * The name of the grader. + */ + name: string; - export interface OutputMessage { - content: OutputMessage.Content; + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ + export interface ScoreModel { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The threshold for the score. + */ + pass_threshold?: number; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; + } + + export namespace ScoreModel { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { /** - * The role of the message. Must be `assistant` for output. + * Text inputs to the model - can contain template strings. */ - role: 'assistant'; + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; /** - * The type of item, which is always `message`. + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. */ - type: 'message'; + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; } - export namespace OutputMessage { - export interface Content { + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { /** - * The text content. + * The text output from the model. */ text: string; /** - * The type of content, which is always `output_text`. + * The type of the output text. Always `output_text`. */ type: 'output_text'; } diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts index ca2b7f424..50c07a514 100644 --- a/src/resources/evals/runs/runs.ts +++ b/src/resources/evals/runs/runs.ts @@ -4,6 +4,7 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; import * as Shared from '../../shared'; +import * as ResponsesAPI from '../../responses/responses'; import * as OutputItemsAPI from './output-items'; import { OutputItemListParams, @@ -83,15 +84,6 @@ export class RunListResponsesPage extends CursorPage {} * A CompletionsRunDataSource object describing a model sampling configuration. */ export interface CreateEvalCompletionsRunDataSource { - input_messages: - | CreateEvalCompletionsRunDataSource.Template - | CreateEvalCompletionsRunDataSource.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model: string; - /** * A StoredCompletionsRunDataSource configuration describing a set of filters */ @@ -105,105 +97,19 @@ export interface CreateEvalCompletionsRunDataSource { */ type: 'completions'; + input_messages?: + | CreateEvalCompletionsRunDataSource.Template + | CreateEvalCompletionsRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + sampling_params?: CreateEvalCompletionsRunDataSource.SamplingParams; } export namespace CreateEvalCompletionsRunDataSource { - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - export interface InputMessage { - content: InputMessage.Content; - - /** - * The role of the message. One of `user`, `system`, or `developer`. - */ - role: 'user' | 'system' | 'developer'; - - /** - * The type of item, which is always `message`. - */ - type: 'message'; - } - - export namespace InputMessage { - export interface Content { - /** - * The text content. - */ - text: string; - - /** - * The type of content, which is always `input_text`. - */ - type: 'input_text'; - } - } - - export interface OutputMessage { - content: OutputMessage.Content; - - /** - * The role of the message. Must be `assistant` for output. - */ - role: 'assistant'; - - /** - * The type of item, which is always `message`. - */ - type: 'message'; - } - - export namespace OutputMessage { - export interface Content { - /** - * The text content. - */ - text: string; - - /** - * The type of content, which is always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - export interface FileContent { /** * The content of the jsonl file. @@ -240,20 +146,25 @@ export namespace CreateEvalCompletionsRunDataSource { * A StoredCompletionsRunDataSource configuration describing a set of filters */ export interface StoredCompletions { + /** + * The type of source. Always `stored_completions`. + */ + type: 'stored_completions'; + /** * An optional Unix timestamp to filter items created after this time. */ - created_after: number | null; + created_after?: number | null; /** * An optional Unix timestamp to filter items created before this time. */ - created_before: number | null; + created_before?: number | null; /** * An optional maximum number of items to return. */ - limit: number | null; + limit?: number | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -263,17 +174,81 @@ export namespace CreateEvalCompletionsRunDataSource { * Keys are strings with a maximum length of 64 characters. Values are strings with * a maximum length of 512 characters. */ - metadata: Shared.Metadata | null; + metadata?: Shared.Metadata | null; /** * An optional model to filter by (e.g., 'gpt-4o'). */ - model: string | null; + model?: string | null; + } + export interface Template { /** - * The type of source. Always `stored_completions`. + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. */ - type: 'stored_completions'; + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Message { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Message.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Message { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; } export interface SamplingParams { @@ -378,7 +353,10 @@ export interface RunCreateResponse { /** * Information about the run's data source. */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunCreateResponse.Completions; /** * An object representing an error response from the Eval API. @@ -442,162 +420,240 @@ export interface RunCreateResponse { } export namespace RunCreateResponse { - export interface PerModelUsage { - /** - * The number of tokens retrieved from cache. - */ - cached_tokens: number; - + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Completions { /** - * The number of completion tokens generated. + * A EvalResponsesSource object describing a run data source configuration. */ - completion_tokens: number; + source: Completions.FileContent | Completions.FileID | Completions.Responses; /** - * The number of invocations. + * The type of run data source. Always `completions`. */ - invocation_count: number; + type: 'completions'; - /** - * The name of the model. - */ - model_name: string; + input_messages?: Completions.Template | Completions.ItemReference; /** - * The number of prompt tokens used. + * The name of the model to use for generating completions (e.g. "o3-mini"). */ - prompt_tokens: number; + model?: string; - /** - * The total number of tokens used. - */ - total_tokens: number; + sampling_params?: Completions.SamplingParams; } - export interface PerTestingCriteriaResult { - /** - * Number of tests failed for this criteria. - */ - failed: number; + export namespace Completions { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; - /** - * Number of tests passed for this criteria. - */ - passed: number; + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } - /** - * A description of the testing criteria. - */ - testing_criteria: string; - } + export namespace FileContent { + export interface Content { + item: Record; - /** - * Counters summarizing the outcomes of the evaluation run. - */ - export interface ResultCounts { - /** - * Number of output items that resulted in an error. - */ - errored: number; + sample?: Record; + } + } - /** - * Number of output items that failed to pass the evaluation. - */ - failed: number; + export interface FileID { + /** + * The identifier of the file. + */ + id: string; - /** - * Number of output items that passed the evaluation. - */ - passed: number; + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } /** - * Total number of executed output items. + * A EvalResponsesSource object describing a run data source configuration. */ - total: number; - } -} + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; -/** - * A schema representing an evaluation run. - */ -export interface RunRetrieveResponse { - /** - * Unique identifier for the evaluation run. - */ - id: string; + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; - /** - * Unix timestamp (in seconds) when the evaluation run was created. - */ - created_at: number; + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; - /** - * Information about the run's data source. - */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; - /** - * An object representing an error response from the Eval API. - */ - error: EvalAPIError; + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; - /** - * The identifier of the associated evaluation. - */ - eval_id: string; + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; - /** - * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format, and - * querying for objects via API or the dashboard. - * - * Keys are strings with a maximum length of 64 characters. Values are strings with - * a maximum length of 512 characters. - */ - metadata: Shared.Metadata | null; + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; - /** - * The model that is evaluated, if applicable. - */ - model: string; + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; - /** - * The name of the evaluation run. - */ - name: string; + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; - /** - * The type of the object. Always "eval.run". - */ - object: 'eval.run'; + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; - /** - * Usage statistics for each model during the evaluation run. - */ - per_model_usage: Array; + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; - /** - * Results per testing criteria applied during the evaluation run. - */ - per_testing_criteria_results: Array; + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } - /** - * The URL to the rendered evaluation run report on the UI dashboard. - */ - report_url: string; + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; - /** - * Counters summarizing the outcomes of the evaluation run. - */ - result_counts: RunRetrieveResponse.ResultCounts; + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } - /** - * The status of the evaluation run. - */ - status: string; -} + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } -export namespace RunRetrieveResponse { export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -676,7 +732,7 @@ export namespace RunRetrieveResponse { /** * A schema representing an evaluation run. */ -export interface RunListResponse { +export interface RunRetrieveResponse { /** * Unique identifier for the evaluation run. */ @@ -690,7 +746,10 @@ export interface RunListResponse { /** * Information about the run's data source. */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunRetrieveResponse.Completions; /** * An object representing an error response from the Eval API. @@ -730,12 +789,12 @@ export interface RunListResponse { /** * Usage statistics for each model during the evaluation run. */ - per_model_usage: Array; + per_model_usage: Array; /** * Results per testing criteria applied during the evaluation run. */ - per_testing_criteria_results: Array; + per_testing_criteria_results: Array; /** * The URL to the rendered evaluation run report on the UI dashboard. @@ -745,7 +804,7 @@ export interface RunListResponse { /** * Counters summarizing the outcomes of the evaluation run. */ - result_counts: RunListResponse.ResultCounts; + result_counts: RunRetrieveResponse.ResultCounts; /** * The status of the evaluation run. @@ -753,7 +812,241 @@ export interface RunListResponse { status: string; } -export namespace RunListResponse { +export namespace RunRetrieveResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Completions { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Completions.FileContent | Completions.FileID | Completions.Responses; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + input_messages?: Completions.Template | Completions.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Completions.SamplingParams; + } + + export namespace Completions { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -829,18 +1122,10 @@ export namespace RunListResponse { } } -export interface RunDeleteResponse { - deleted?: boolean; - - object?: string; - - run_id?: string; -} - /** * A schema representing an evaluation run. */ -export interface RunCancelResponse { +export interface RunListResponse { /** * Unique identifier for the evaluation run. */ @@ -854,7 +1139,10 @@ export interface RunCancelResponse { /** * Information about the run's data source. */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunListResponse.Completions; /** * An object representing an error response from the Eval API. @@ -894,12 +1182,12 @@ export interface RunCancelResponse { /** * Usage statistics for each model during the evaluation run. */ - per_model_usage: Array; + per_model_usage: Array; /** * Results per testing criteria applied during the evaluation run. */ - per_testing_criteria_results: Array; + per_testing_criteria_results: Array; /** * The URL to the rendered evaluation run report on the UI dashboard. @@ -909,7 +1197,7 @@ export interface RunCancelResponse { /** * Counters summarizing the outcomes of the evaluation run. */ - result_counts: RunCancelResponse.ResultCounts; + result_counts: RunListResponse.ResultCounts; /** * The status of the evaluation run. @@ -917,25 +1205,660 @@ export interface RunCancelResponse { status: string; } -export namespace RunCancelResponse { - export interface PerModelUsage { +export namespace RunListResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Completions { /** - * The number of tokens retrieved from cache. + * A EvalResponsesSource object describing a run data source configuration. */ - cached_tokens: number; + source: Completions.FileContent | Completions.FileID | Completions.Responses; /** - * The number of completion tokens generated. + * The type of run data source. Always `completions`. */ - completion_tokens: number; + type: 'completions'; - /** - * The number of invocations. - */ - invocation_count: number; + input_messages?: Completions.Template | Completions.ItemReference; /** - * The name of the model. + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Completions.SamplingParams; + } + + export namespace Completions { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +export interface RunDeleteResponse { + deleted?: boolean; + + object?: string; + + run_id?: string; +} + +/** + * A schema representing an evaluation run. + */ +export interface RunCancelResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunCancelResponse.Completions; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunCancelResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunCancelResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Completions { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Completions.FileContent | Completions.FileID | Completions.Responses; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + input_messages?: Completions.Template | Completions.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Completions.SamplingParams; + } + + export namespace Completions { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. */ model_name: string; @@ -997,7 +1920,10 @@ export interface RunCreateParams { /** * Details about the run's data source. */ - data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource; + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunCreateParams.CreateEvalResponsesRunDataSource; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -1015,6 +1941,247 @@ export interface RunCreateParams { name?: string; } +export namespace RunCreateParams { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface CreateEvalResponsesRunDataSource { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: + | CreateEvalResponsesRunDataSource.FileContent + | CreateEvalResponsesRunDataSource.FileID + | CreateEvalResponsesRunDataSource.Responses; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + input_messages?: + | CreateEvalResponsesRunDataSource.Template + | CreateEvalResponsesRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams; + } + + export namespace CreateEvalResponsesRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Whether to allow parallel tool calls. This is a query parameter used to select + * responses. + */ + allow_parallel_tool_calls?: boolean | null; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional search string for instructions. This is a query parameter used to + * select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } +} + export interface RunListParams extends CursorPageParams { /** * Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for @@ -1023,8 +2190,8 @@ export interface RunListParams extends CursorPageParams { order?: 'asc' | 'desc'; /** - * Filter runs by status. Use "queued" | "in_progress" | "failed" | "completed" | - * "canceled". + * Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed` + * | `canceled`. */ status?: 'queued' | 'in_progress' | 'completed' | 'canceled' | 'failed'; } diff --git a/src/resources/fine-tuning/checkpoints/permissions.ts b/src/resources/fine-tuning/checkpoints/permissions.ts index 500c3de81..e808b2001 100644 --- a/src/resources/fine-tuning/checkpoints/permissions.ts +++ b/src/resources/fine-tuning/checkpoints/permissions.ts @@ -61,9 +61,13 @@ export class Permissions extends APIResource { */ del( fineTunedModelCheckpoint: string, + permissionId: string, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.delete(`/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, options); + return this._client.delete( + `/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions/${permissionId}`, + options, + ); } } diff --git a/src/resources/images.ts b/src/resources/images.ts index 8e1c6d92e..de1882d30 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -5,7 +5,7 @@ import * as Core from '../core'; export class Images extends APIResource { /** - * Creates a variation of a given image. + * Creates a variation of a given image. This endpoint only supports `dall-e-2`. */ createVariation( body: ImageCreateVariationParams, @@ -15,7 +15,8 @@ export class Images extends APIResource { } /** - * Creates an edited or extended image given an original image and a prompt. + * Creates an edited or extended image given one or more source images and a + * prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. */ edit(body: ImageEditParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/images/edits', Core.multipartFormRequestOptions({ body, ...options })); @@ -23,6 +24,7 @@ export class Images extends APIResource { /** * Creates an image given a prompt. + * [Learn more](https://platform.openai.com/docs/guides/images). */ generate(body: ImageGenerateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/images/generations', { body, ...options }); @@ -30,33 +32,93 @@ export class Images extends APIResource { } /** - * Represents the url or the content of an image generated by the OpenAI API. + * Represents the content or the URL of an image generated by the OpenAI API. */ export interface Image { /** - * The base64-encoded JSON of the generated image, if `response_format` is - * `b64_json`. + * The base64-encoded JSON of the generated image. Default value for `gpt-image-1`, + * and only present if `response_format` is set to `b64_json` for `dall-e-2` and + * `dall-e-3`. */ b64_json?: string; /** - * The prompt that was used to generate the image, if there was any revision to the - * prompt. + * For `dall-e-3` only, the revised prompt that was used to generate the image. */ revised_prompt?: string; /** - * The URL of the generated image, if `response_format` is `url` (default). + * When using `dall-e-2` or `dall-e-3`, the URL of the generated image if + * `response_format` is set to `url` (default value). Unsupported for + * `gpt-image-1`. */ url?: string; } -export type ImageModel = 'dall-e-2' | 'dall-e-3'; +export type ImageModel = 'dall-e-2' | 'dall-e-3' | 'gpt-image-1'; +/** + * The response from the image generation endpoint. + */ export interface ImagesResponse { + /** + * The Unix timestamp (in seconds) of when the image was created. + */ created: number; - data: Array; + /** + * The list of generated images. + */ + data?: Array; + + /** + * For `gpt-image-1` only, the token usage information for the image generation. + */ + usage?: ImagesResponse.Usage; +} + +export namespace ImagesResponse { + /** + * For `gpt-image-1` only, the token usage information for the image generation. + */ + export interface Usage { + /** + * The number of tokens (images and text) in the input prompt. + */ + input_tokens: number; + + /** + * The input tokens detailed information for the image generation. + */ + input_tokens_details: Usage.InputTokensDetails; + + /** + * The number of image tokens in the output image. + */ + output_tokens: number; + + /** + * The total number of tokens (images and text) used for the image generation. + */ + total_tokens: number; + } + + export namespace Usage { + /** + * The input tokens detailed information for the image generation. + */ + export interface InputTokensDetails { + /** + * The number of image tokens in the input prompt. + */ + image_tokens: number; + + /** + * The number of text tokens in the input prompt. + */ + text_tokens: number; + } + } } export interface ImageCreateVariationParams { @@ -73,8 +135,7 @@ export interface ImageCreateVariationParams { model?: (string & {}) | ImageModel | null; /** - * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - * `n=1` is supported. + * The number of images to generate. Must be between 1 and 10. */ n?: number | null; @@ -101,27 +162,31 @@ export interface ImageCreateVariationParams { export interface ImageEditParams { /** - * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask - * is not provided, image must have transparency, which will be used as the mask. + * The image(s) to edit. Must be a supported image file or an array of images. For + * `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + * 25MB. For `dall-e-2`, you can only provide one image, and it should be a square + * `png` file less than 4MB. */ - image: Core.Uploadable; + image: Core.Uploadable | Array; /** * A text description of the desired image(s). The maximum length is 1000 - * characters. + * characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. */ prompt: string; /** * An additional image whose fully transparent areas (e.g. where alpha is zero) - * indicate where `image` should be edited. Must be a valid PNG file, less than + * indicate where `image` should be edited. If there are multiple images provided, + * the mask will be applied on the first image. Must be a valid PNG file, less than * 4MB, and have the same dimensions as `image`. */ mask?: Core.Uploadable; /** - * The model to use for image generation. Only `dall-e-2` is supported at this - * time. + * The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + * supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + * is used. */ model?: (string & {}) | ImageModel | null; @@ -130,16 +195,25 @@ export interface ImageEditParams { */ n?: number | null; + /** + * The quality of the image that will be generated. `high`, `medium` and `low` are + * only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + * Defaults to `auto`. + */ + quality?: 'standard' | 'low' | 'medium' | 'high' | 'auto' | null; + /** * The format in which the generated images are returned. Must be one of `url` or * `b64_json`. URLs are only valid for 60 minutes after the image has been - * generated. + * generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + * will always return base64-encoded images. */ response_format?: 'url' | 'b64_json' | null; /** - * The size of the generated images. Must be one of `256x256`, `512x512`, or - * `1024x1024`. + * The size of the generated images. Must be one of `1024x1024`, `1536x1024` + * (landscape), `1024x1536` (portrait), or `auto` (default value) for + * `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. */ size?: '256x256' | '512x512' | '1024x1024' | null; @@ -153,16 +227,36 @@ export interface ImageEditParams { export interface ImageGenerateParams { /** - * A text description of the desired image(s). The maximum length is 1000 - * characters for `dall-e-2` and 4000 characters for `dall-e-3`. + * A text description of the desired image(s). The maximum length is 32000 + * characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + * for `dall-e-3`. */ prompt: string; /** - * The model to use for image generation. + * Allows to set transparency for the background of the generated image(s). This + * parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + * `opaque` or `auto` (default value). When `auto` is used, the model will + * automatically determine the best background for the image. + * + * If `transparent`, the output format needs to support transparency, so it should + * be set to either `png` (default value) or `webp`. + */ + background?: 'transparent' | 'opaque' | 'auto' | null; + + /** + * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + * `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + * `gpt-image-1` is used. */ model?: (string & {}) | ImageModel | null; + /** + * Control the content-moderation level for images generated by `gpt-image-1`. Must + * be either `low` for less restrictive filtering or `auto` (default value). + */ + moderation?: 'low' | 'auto' | null; + /** * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only * `n=1` is supported. @@ -170,31 +264,59 @@ export interface ImageGenerateParams { n?: number | null; /** - * The quality of the image that will be generated. `hd` creates images with finer - * details and greater consistency across the image. This param is only supported - * for `dall-e-3`. + * The compression level (0-100%) for the generated images. This parameter is only + * supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + * defaults to 100. */ - quality?: 'standard' | 'hd'; + output_compression?: number | null; /** - * The format in which the generated images are returned. Must be one of `url` or - * `b64_json`. URLs are only valid for 60 minutes after the image has been - * generated. + * The format in which the generated images are returned. This parameter is only + * supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + */ + output_format?: 'png' | 'jpeg' | 'webp' | null; + + /** + * The quality of the image that will be generated. + * + * - `auto` (default value) will automatically select the best quality for the + * given model. + * - `high`, `medium` and `low` are supported for `gpt-image-1`. + * - `hd` and `standard` are supported for `dall-e-3`. + * - `standard` is the only option for `dall-e-2`. + */ + quality?: 'standard' | 'hd' | 'low' | 'medium' | 'high' | 'auto' | null; + + /** + * The format in which generated images with `dall-e-2` and `dall-e-3` are + * returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + * after the image has been generated. This parameter isn't supported for + * `gpt-image-1` which will always return base64-encoded images. */ response_format?: 'url' | 'b64_json' | null; /** - * The size of the generated images. Must be one of `256x256`, `512x512`, or - * `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - * `1024x1792` for `dall-e-3` models. + * The size of the generated images. Must be one of `1024x1024`, `1536x1024` + * (landscape), `1024x1536` (portrait), or `auto` (default value) for + * `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + * one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. */ - size?: '256x256' | '512x512' | '1024x1024' | '1792x1024' | '1024x1792' | null; + size?: + | 'auto' + | '1024x1024' + | '1536x1024' + | '1024x1536' + | '256x256' + | '512x512' + | '1792x1024' + | '1024x1792' + | null; /** - * The style of the generated images. Must be one of `vivid` or `natural`. Vivid - * causes the model to lean towards generating hyper-real and dramatic images. - * Natural causes the model to produce more natural, less hyper-real looking - * images. This param is only supported for `dall-e-3`. + * The style of the generated images. This parameter is only supported for + * `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + * towards generating hyper-real and dramatic images. Natural causes the model to + * produce more natural, less hyper-real looking images. */ style?: 'vivid' | 'natural' | null; diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 52dd079fc..771b8daf2 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -2158,6 +2158,160 @@ export namespace ResponseReasoningItem { } } +/** + * Emitted when a new reasoning summary part is added. + */ +export interface ResponseReasoningSummaryPartAddedEvent { + /** + * The ID of the item this summary part is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary part is associated with. + */ + output_index: number; + + /** + * The summary part that was added. + */ + part: ResponseReasoningSummaryPartAddedEvent.Part; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The type of the event. Always `response.reasoning_summary_part.added`. + */ + type: 'response.reasoning_summary_part.added'; +} + +export namespace ResponseReasoningSummaryPartAddedEvent { + /** + * The summary part that was added. + */ + export interface Part { + /** + * The text of the summary part. + */ + text: string; + + /** + * The type of the summary part. Always `summary_text`. + */ + type: 'summary_text'; + } +} + +/** + * Emitted when a reasoning summary part is completed. + */ +export interface ResponseReasoningSummaryPartDoneEvent { + /** + * The ID of the item this summary part is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary part is associated with. + */ + output_index: number; + + /** + * The completed summary part. + */ + part: ResponseReasoningSummaryPartDoneEvent.Part; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The type of the event. Always `response.reasoning_summary_part.done`. + */ + type: 'response.reasoning_summary_part.done'; +} + +export namespace ResponseReasoningSummaryPartDoneEvent { + /** + * The completed summary part. + */ + export interface Part { + /** + * The text of the summary part. + */ + text: string; + + /** + * The type of the summary part. Always `summary_text`. + */ + type: 'summary_text'; + } +} + +/** + * Emitted when a delta is added to a reasoning summary text. + */ +export interface ResponseReasoningSummaryTextDeltaEvent { + /** + * The text delta that was added to the summary. + */ + delta: string; + + /** + * The ID of the item this summary text delta is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary text delta is associated with. + */ + output_index: number; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The type of the event. Always `response.reasoning_summary_text.delta`. + */ + type: 'response.reasoning_summary_text.delta'; +} + +/** + * Emitted when a reasoning summary text is completed. + */ +export interface ResponseReasoningSummaryTextDoneEvent { + /** + * The ID of the item this summary text is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary text is associated with. + */ + output_index: number; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The full text of the completed reasoning summary. + */ + text: string; + + /** + * The type of the event. Always `response.reasoning_summary_text.done`. + */ + type: 'response.reasoning_summary_text.done'; +} + /** * Emitted when there is a partial refusal text. */ @@ -2252,6 +2406,10 @@ export type ResponseStreamEvent = | ResponseIncompleteEvent | ResponseOutputItemAddedEvent | ResponseOutputItemDoneEvent + | ResponseReasoningSummaryPartAddedEvent + | ResponseReasoningSummaryPartDoneEvent + | ResponseReasoningSummaryTextDeltaEvent + | ResponseReasoningSummaryTextDoneEvent | ResponseRefusalDeltaEvent | ResponseRefusalDoneEvent | ResponseTextAnnotationDeltaEvent @@ -2967,6 +3125,10 @@ export declare namespace Responses { type ResponseOutputRefusal as ResponseOutputRefusal, type ResponseOutputText as ResponseOutputText, type ResponseReasoningItem as ResponseReasoningItem, + type ResponseReasoningSummaryPartAddedEvent as ResponseReasoningSummaryPartAddedEvent, + type ResponseReasoningSummaryPartDoneEvent as ResponseReasoningSummaryPartDoneEvent, + type ResponseReasoningSummaryTextDeltaEvent as ResponseReasoningSummaryTextDeltaEvent, + type ResponseReasoningSummaryTextDoneEvent as ResponseReasoningSummaryTextDoneEvent, type ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent, type ResponseRefusalDoneEvent as ResponseRefusalDoneEvent, type ResponseStatus as ResponseStatus, diff --git a/tests/api-resources/evals/evals.test.ts b/tests/api-resources/evals/evals.test.ts index fabc2602a..45d1c4f9b 100644 --- a/tests/api-resources/evals/evals.test.ts +++ b/tests/api-resources/evals/evals.test.ts @@ -47,7 +47,6 @@ describe('resource evals', () => { ], metadata: { foo: 'string' }, name: 'name', - share_with_openai: true, }); }); diff --git a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts index e7aceae3e..1e4b40a94 100644 --- a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts +++ b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts @@ -61,10 +61,10 @@ describe('resource permissions', () => { ).rejects.toThrow(OpenAI.NotFoundError); }); - // OpenAPI spec is slightly incorrect - test.skip('del', async () => { + test('del', async () => { const responsePromise = client.fineTuning.checkpoints.permissions.del( 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + 'cp_zc4Q7MP6XxulcVzj4MZdwsAB', ); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -75,13 +75,14 @@ describe('resource permissions', () => { expect(dataAndResponse.response).toBe(rawResponse); }); - // OpenAPI spec is slightly incorrect - test.skip('del: request options instead of params are passed correctly', async () => { + test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.fineTuning.checkpoints.permissions.del('ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', { - path: '/_stainless_unknown_path', - }), + client.fineTuning.checkpoints.permissions.del( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + 'cp_zc4Q7MP6XxulcVzj4MZdwsAB', + { path: '/_stainless_unknown_path' }, + ), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index 4f15e20ac..e9b460254 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -54,6 +54,7 @@ describe('resource images', () => { mask: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'string', n: 1, + quality: 'high', response_format: 'url', size: '1024x1024', user: 'user-1234', @@ -74,9 +75,13 @@ describe('resource images', () => { test('generate: required and optional params', async () => { const response = await client.images.generate({ prompt: 'A cute baby sea otter', + background: 'transparent', model: 'string', + moderation: 'low', n: 1, - quality: 'standard', + output_compression: 100, + output_format: 'png', + quality: 'medium', response_format: 'url', size: '1024x1024', style: 'vivid', From f99e7c3ca9348a8611665b54a68a9e377dc61ea9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Apr 2025 16:32:08 +0000 Subject: [PATCH 511/533] release: 4.96.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 18 ++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 22 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 2f61d58b0..5b0015f5b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.95.1" + ".": "4.96.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f864e203..47717a4eb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 4.96.0 (2025-04-23) + +Full Changelog: [v4.95.1...v4.96.0](https://github.com/openai/openai-node/compare/v4.95.1...v4.96.0) + +### Features + +* **api:** adding new image model support ([a00d331](https://github.com/openai/openai-node/commit/a00d33190edd08df7d9c088c00ab7b77673f88ba)) + + +### Bug Fixes + +* **types:** export AssistantStream ([#1472](https://github.com/openai/openai-node/issues/1472)) ([626c844](https://github.com/openai/openai-node/commit/626c844a758a68ffbff48873d4773be2e3868952)) + + +### Chores + +* **ci:** add timeout thresholds for CI jobs ([e465063](https://github.com/openai/openai-node/commit/e46506351097f1de39c866c28b6ec20fa724fc36)) + ## 4.95.1 (2025-04-18) Full Changelog: [v4.95.0...v4.95.1](https://github.com/openai/openai-node/compare/v4.95.0...v4.95.1) diff --git a/jsr.json b/jsr.json index 8271c8522..6b574ce15 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.95.1", + "version": "4.96.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 76fe7d4d0..7b4e86f8e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.95.1", + "version": "4.96.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index cd1995322..1215a5e79 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.95.1'; // x-release-please-version +export const VERSION = '4.96.0'; // x-release-please-version From ead76fc6429ac52a1c8b008ac5c0afcefaa0bae5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Apr 2025 17:55:23 +0000 Subject: [PATCH 512/533] chore(ci): run on more branches and use depot runners --- .github/workflows/ci.yml | 20 ++++++++++---------- .github/workflows/publish-jsr.yml | 2 +- .github/workflows/publish-npm.yml | 2 +- .github/workflows/release-doctor.yml | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b0aac41b2..9b293f4c2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,18 +1,18 @@ name: CI on: push: - branches: - - master - pull_request: - branches: - - master - - next + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'stl-preview-head/**' + - 'stl-preview-base/**' jobs: lint: timeout-minutes: 10 name: lint - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - uses: actions/checkout@v4 @@ -30,7 +30,7 @@ jobs: build: timeout-minutes: 5 name: build - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 permissions: contents: read id-token: write @@ -65,7 +65,7 @@ jobs: test: timeout-minutes: 10 name: test - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 steps: - uses: actions/checkout@v4 @@ -83,7 +83,7 @@ jobs: examples: timeout-minutes: 10 name: examples - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 if: github.repository == 'openai/openai-node' steps: diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml index 1e46d6bfb..efb18bb16 100644 --- a/.github/workflows/publish-jsr.yml +++ b/.github/workflows/publish-jsr.yml @@ -7,7 +7,7 @@ on: jobs: publish: name: publish - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 permissions: contents: read id-token: write diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index 5a3711b53..cf1d07e09 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -7,7 +7,7 @@ on: jobs: publish: name: publish - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 environment: publish steps: diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 37bc09e80..1c794642c 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -8,7 +8,7 @@ on: jobs: release_doctor: name: release doctor - runs-on: ubuntu-latest + runs-on: depot-ubuntu-24.04 environment: publish if: github.repository == 'openai/openai-node' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') From 214da398c76f46d40994665f3ca7e10e203e9579 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 23 Apr 2025 19:58:52 +0000 Subject: [PATCH 513/533] chore(ci): only use depot for staging repos --- .github/workflows/ci.yml | 8 ++++---- .github/workflows/publish-jsr.yml | 2 +- .github/workflows/publish-npm.yml | 2 +- .github/workflows/release-doctor.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9b293f4c2..49a043930 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ jobs: lint: timeout-minutes: 10 name: lint - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/openai-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 @@ -30,7 +30,7 @@ jobs: build: timeout-minutes: 5 name: build - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/openai-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} permissions: contents: read id-token: write @@ -65,7 +65,7 @@ jobs: test: timeout-minutes: 10 name: test - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/openai-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 @@ -83,7 +83,7 @@ jobs: examples: timeout-minutes: 10 name: examples - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/openai-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} if: github.repository == 'openai/openai-node' steps: diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml index efb18bb16..1e46d6bfb 100644 --- a/.github/workflows/publish-jsr.yml +++ b/.github/workflows/publish-jsr.yml @@ -7,7 +7,7 @@ on: jobs: publish: name: publish - runs-on: depot-ubuntu-24.04 + runs-on: ubuntu-latest permissions: contents: read id-token: write diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index cf1d07e09..5a3711b53 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -7,7 +7,7 @@ on: jobs: publish: name: publish - runs-on: depot-ubuntu-24.04 + runs-on: ubuntu-latest environment: publish steps: diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 1c794642c..37bc09e80 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -8,7 +8,7 @@ on: jobs: release_doctor: name: release doctor - runs-on: depot-ubuntu-24.04 + runs-on: ubuntu-latest environment: publish if: github.repository == 'openai/openai-node' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') From 3e7c92c8a76c1f747610d63d9d69a88b796ee9fc Mon Sep 17 00:00:00 2001 From: Isaac Batista Date: Mon, 28 Apr 2025 12:31:09 -0300 Subject: [PATCH 514/533] fix(types): export ParseableToolsParams (#1486) --- src/lib/ResponsesParser.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/ResponsesParser.ts b/src/lib/ResponsesParser.ts index 8d762d5bb..c64c6ffa0 100644 --- a/src/lib/ResponsesParser.ts +++ b/src/lib/ResponsesParser.ts @@ -14,7 +14,7 @@ import { } from '../resources/responses/responses'; import { type AutoParseableTextFormat, isAutoParsableResponseFormat } from '../lib/parser'; -type ParseableToolsParams = Array | ChatCompletionTool | null; +export type ParseableToolsParams = Array | ChatCompletionTool | null; export type ResponseCreateParamsWithTools = ResponseCreateParamsBase & { tools?: ParseableToolsParams; From 593fea4f86d46035540f5a34314d6f4b31960dd2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 29 Apr 2025 05:07:54 +0000 Subject: [PATCH 515/533] release: 4.96.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 5b0015f5b..20d7ece71 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.96.0" + ".": "4.96.1" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 47717a4eb..3a78e5928 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.96.1 (2025-04-29) + +Full Changelog: [v4.96.0...v4.96.1](https://github.com/openai/openai-node/compare/v4.96.0...v4.96.1) + +### Bug Fixes + +* **types:** export ParseableToolsParams ([#1486](https://github.com/openai/openai-node/issues/1486)) ([eb055b2](https://github.com/openai/openai-node/commit/eb055b26ce90e5fe1b101a95a4390956d519e168)) + + +### Chores + +* **ci:** only use depot for staging repos ([e80af47](https://github.com/openai/openai-node/commit/e80af47590056baa8f456e8d60c37f0d00ff08c4)) +* **ci:** run on more branches and use depot runners ([b04a801](https://github.com/openai/openai-node/commit/b04a801d0356105eacddbb4d10f4359719585dd6)) + ## 4.96.0 (2025-04-23) Full Changelog: [v4.95.1...v4.96.0](https://github.com/openai/openai-node/compare/v4.95.1...v4.96.0) diff --git a/jsr.json b/jsr.json index 6b574ce15..73ea2185e 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.96.0", + "version": "4.96.1", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 7b4e86f8e..54046d4f2 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.96.0", + "version": "4.96.1", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 1215a5e79..45539224e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.96.0'; // x-release-please-version +export const VERSION = '4.96.1'; // x-release-please-version From 37ab638baee4f68f3149f4a4f96a136f32966739 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 29 Apr 2025 17:08:58 +0000 Subject: [PATCH 516/533] release: 4.96.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 20d7ece71..88f780d30 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.96.1" + ".": "4.96.2" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a78e5928..d724d8922 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 4.96.2 (2025-04-29) + +Full Changelog: [v4.96.1...v4.96.2](https://github.com/openai/openai-node/compare/v4.96.1...v4.96.2) + +### Bug Fixes + +* **types:** export ParseableToolsParams ([#1486](https://github.com/openai/openai-node/issues/1486)) ([3e7c92c](https://github.com/openai/openai-node/commit/3e7c92c8a76c1f747610d63d9d69a88b796ee9fc)) + + +### Chores + +* **ci:** only use depot for staging repos ([214da39](https://github.com/openai/openai-node/commit/214da398c76f46d40994665f3ca7e10e203e9579)) +* **ci:** run on more branches and use depot runners ([ead76fc](https://github.com/openai/openai-node/commit/ead76fc6429ac52a1c8b008ac5c0afcefaa0bae5)) + ## 4.96.1 (2025-04-29) Full Changelog: [v4.96.0...v4.96.1](https://github.com/openai/openai-node/compare/v4.96.0...v4.96.1) diff --git a/jsr.json b/jsr.json index 73ea2185e..8eca06e74 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.96.1", + "version": "4.96.2", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 54046d4f2..d563394c0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.96.1", + "version": "4.96.2", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 45539224e..1674d74fe 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.96.1'; // x-release-please-version +export const VERSION = '4.96.2'; // x-release-please-version From 0989ddcfd5ed0a149bbc67d61f93e0f49c397c72 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Apr 2025 15:45:48 +0000 Subject: [PATCH 517/533] docs(readme): fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8515c81ed..bbf72226a 100644 --- a/README.md +++ b/README.md @@ -151,7 +151,7 @@ async function main() { main(); ``` -Error codes are as followed: +Error codes are as follows: | Status Code | Error Type | | ----------- | -------------------------- | From 995075b632051b5bb33c0381056107b2fe93931e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Apr 2025 22:11:59 +0000 Subject: [PATCH 518/533] chore(docs): add missing deprecation warnings --- src/resources/chat/completions/completions.ts | 8 ++++---- src/resources/fine-tuning/jobs/jobs.ts | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 17edac02c..251020337 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -1169,7 +1169,7 @@ export interface ChatCompletionCreateParamsBase { frequency_penalty?: number | null; /** - * Deprecated in favor of `tool_choice`. + * @deprecated Deprecated in favor of `tool_choice`. * * Controls which (if any) function is called by the model. * @@ -1187,7 +1187,7 @@ export interface ChatCompletionCreateParamsBase { function_call?: 'none' | 'auto' | ChatCompletionFunctionCallOption; /** - * Deprecated in favor of `tools`. + * @deprecated Deprecated in favor of `tools`. * * A list of functions the model may generate JSON inputs for. */ @@ -1220,8 +1220,8 @@ export interface ChatCompletionCreateParamsBase { max_completion_tokens?: number | null; /** - * The maximum number of [tokens](/tokenizer) that can be generated in the chat - * completion. This value can be used to control + * @deprecated The maximum number of [tokens](/tokenizer) that can be generated in + * the chat completion. This value can be used to control * [costs](https://openai.com/api/pricing/) for text generated via API. * * This value is now deprecated in favor of `max_completion_tokens`, and is not diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 9be03c302..2198e8174 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -466,8 +466,9 @@ export interface JobCreateParams { training_file: string; /** - * The hyperparameters used for the fine-tuning job. This value is now deprecated - * in favor of `method`, and should be passed in under the `method` parameter. + * @deprecated The hyperparameters used for the fine-tuning job. This value is now + * deprecated in favor of `method`, and should be passed in under the `method` + * parameter. */ hyperparameters?: JobCreateParams.Hyperparameters; From dfbdc65d3ed17f0063d02906239371b88e04e5fd Mon Sep 17 00:00:00 2001 From: mini-peanut Date: Fri, 2 May 2025 09:00:11 +0800 Subject: [PATCH 519/533] docs: fix "procesing" -> "processing" in realtime examples (#1406) --- examples/azure/realtime/websocket.ts | 4 ++-- examples/azure/realtime/ws.ts | 4 ++-- examples/realtime/websocket.ts | 4 ++-- examples/realtime/ws.ts | 4 ++-- realtime.md | 7 +++---- 5 files changed, 11 insertions(+), 12 deletions(-) diff --git a/examples/azure/realtime/websocket.ts b/examples/azure/realtime/websocket.ts index bec74e654..4175b4a71 100644 --- a/examples/azure/realtime/websocket.ts +++ b/examples/azure/realtime/websocket.ts @@ -40,7 +40,7 @@ async function main() { rt.on('error', (err) => { // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors + // likely want to continue processing events regardless of any errors throw err; }); @@ -57,4 +57,4 @@ async function main() { rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); } -main(); +main(); \ No newline at end of file diff --git a/examples/azure/realtime/ws.ts b/examples/azure/realtime/ws.ts index 6ab7b742a..e86a79092 100644 --- a/examples/azure/realtime/ws.ts +++ b/examples/azure/realtime/ws.ts @@ -40,7 +40,7 @@ async function main() { rt.on('error', (err) => { // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors + // likely want to continue processing events regardless of any errors throw err; }); @@ -57,4 +57,4 @@ async function main() { rt.socket.on('close', () => console.log('\nConnection closed!')); } -main(); +main(); \ No newline at end of file diff --git a/examples/realtime/websocket.ts b/examples/realtime/websocket.ts index 0da131bc3..f1c46dd41 100644 --- a/examples/realtime/websocket.ts +++ b/examples/realtime/websocket.ts @@ -28,7 +28,7 @@ async function main() { rt.on('error', (err) => { // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors + // likely want to continue processing events regardless of any errors throw err; }); @@ -45,4 +45,4 @@ async function main() { rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); } -main(); +main(); \ No newline at end of file diff --git a/examples/realtime/ws.ts b/examples/realtime/ws.ts index 08c6fbcb6..1ce6b2045 100644 --- a/examples/realtime/ws.ts +++ b/examples/realtime/ws.ts @@ -28,7 +28,7 @@ async function main() { rt.on('error', (err) => { // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors + // likely want to continue processing events regardless of any errors throw err; }); @@ -45,4 +45,4 @@ async function main() { rt.socket.on('close', () => console.log('\nConnection closed!')); } -main(); +main(); \ No newline at end of file diff --git a/realtime.md b/realtime.md index 2fcd17e9e..7e8d84a3c 100644 --- a/realtime.md +++ b/realtime.md @@ -39,7 +39,7 @@ rt.socket.on('open', () => { rt.on('error', (err) => { // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors + // likely want to continue processing events regardless of any errors throw err; }); @@ -80,8 +80,7 @@ It is **highly recommended** that you register an `error` event listener and han const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); rt.on('error', (err) => { // in a real world scenario this should be logged somewhere as you - // likely want to continue procesing events regardless of any errors + // likely want to continue processing events regardless of any errors throw err; }); -``` - +``` \ No newline at end of file From 31cd88fae84f630c8e86e1acab6c4cd9283c886c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 2 May 2025 19:09:26 +0000 Subject: [PATCH 520/533] feat(api): add image sizes, reasoning encryption --- .stats.yml | 6 +-- src/resources/audio/speech.ts | 2 +- src/resources/images.ts | 24 +++++++-- src/resources/responses/responses.ts | 79 ++++++++++++++++++---------- tests/api-resources/images.test.ts | 1 + 5 files changed, 74 insertions(+), 38 deletions(-) diff --git a/.stats.yml b/.stats.yml index d92408173..0c8278866 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-8b68ae6b807dca92e914da1dd9e835a20f69b075e79102a264367fd7fddddb33.yml -openapi_spec_hash: b6ade5b1a6327339e6669e1134de2d03 -config_hash: b597cd9a31e9e5ec709e2eefb4c54122 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-0ee6b36cf3cc278cef4199a6aec5f7d530a6c1f17a74830037e96d50ca1edc50.yml +openapi_spec_hash: e8ec5f46bc0655b34f292422d58a60f6 +config_hash: d9b6b6e6bc85744663e300eebc482067 diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 4b99ee5f4..e218c8299 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -66,7 +66,7 @@ export interface SpeechCreateParams { /** * The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - * the default. + * the default. Does not work with `gpt-4o-mini-tts`. */ speed?: number; } diff --git a/src/resources/images.ts b/src/resources/images.ts index de1882d30..32f1e123c 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -162,10 +162,13 @@ export interface ImageCreateVariationParams { export interface ImageEditParams { /** - * The image(s) to edit. Must be a supported image file or an array of images. For - * `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than - * 25MB. For `dall-e-2`, you can only provide one image, and it should be a square - * `png` file less than 4MB. + * The image(s) to edit. Must be a supported image file or an array of images. + * + * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + * 25MB. You can provide up to 16 images. + * + * For `dall-e-2`, you can only provide one image, and it should be a square `png` + * file less than 4MB. */ image: Core.Uploadable | Array; @@ -175,6 +178,17 @@ export interface ImageEditParams { */ prompt: string; + /** + * Allows to set transparency for the background of the generated image(s). This + * parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + * `opaque` or `auto` (default value). When `auto` is used, the model will + * automatically determine the best background for the image. + * + * If `transparent`, the output format needs to support transparency, so it should + * be set to either `png` (default value) or `webp`. + */ + background?: 'transparent' | 'opaque' | 'auto' | null; + /** * An additional image whose fully transparent areas (e.g. where alpha is zero) * indicate where `image` should be edited. If there are multiple images provided, @@ -215,7 +229,7 @@ export interface ImageEditParams { * (landscape), `1024x1536` (portrait), or `auto` (default value) for * `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. */ - size?: '256x256' | '512x512' | '1024x1024' | null; + size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | 'auto' | null; /** * A unique identifier representing your end-user, which can help OpenAI to monitor diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 771b8daf2..0a6e3666d 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -158,7 +158,7 @@ export interface ComputerTool { /** * The type of computer environment to control. */ - environment: 'mac' | 'windows' | 'ubuntu' | 'browser'; + environment: 'windows' | 'mac' | 'linux' | 'ubuntu' | 'browser'; /** * The type of the computer use tool. Always `computer_use_preview`. @@ -209,9 +209,9 @@ export interface FileSearchTool { vector_store_ids: Array; /** - * A filter to apply based on file attributes. + * A filter to apply. */ - filters?: Shared.ComparisonFilter | Shared.CompoundFilter; + filters?: Shared.ComparisonFilter | Shared.CompoundFilter | null; /** * The maximum number of results to return. This number should be between 1 and 50 @@ -258,12 +258,12 @@ export interface FunctionTool { /** * A JSON schema object describing the parameters of the function. */ - parameters: Record; + parameters: Record | null; /** * Whether to enforce strict parameter validation. Default `true`. */ - strict: boolean; + strict: boolean | null; /** * The type of the function tool. Always `function`. @@ -1581,11 +1581,17 @@ export interface ResponseInProgressEvent { * - `message.input_image.image_url`: Include image urls from the input message. * - `computer_call_output.output.image_url`: Include image urls from the computer * call output. + * - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + * tokens in reasoning item outputs. This enables reasoning items to be used in + * multi-turn conversations when using the Responses API statelessly (like when + * the `store` parameter is set to `false`, or when an organization is enrolled + * in the zero data retention program). */ export type ResponseIncludable = | 'file_search_call.results' | 'message.input_image.image_url' - | 'computer_call_output.output.image_url'; + | 'computer_call_output.output.image_url' + | 'reasoning.encrypted_content'; /** * An event that is emitted when a response finishes as incomplete. @@ -1650,7 +1656,7 @@ export interface ResponseInputFile { /** * The ID of the file to be sent to the model. */ - file_id?: string; + file_id?: string | null; /** * The name of the file to be sent to the model. @@ -1667,7 +1673,7 @@ export interface ResponseInputImage { * The detail level of the image to be sent to the model. One of `high`, `low`, or * `auto`. Defaults to `auto`. */ - detail: 'high' | 'low' | 'auto'; + detail: 'low' | 'high' | 'auto'; /** * The type of the input item. Always `input_image`. @@ -1758,19 +1764,19 @@ export namespace ResponseInputItem { /** * The ID of the computer tool call output. */ - id?: string; + id?: string | null; /** * The safety checks reported by the API that have been acknowledged by the * developer. */ - acknowledged_safety_checks?: Array; + acknowledged_safety_checks?: Array | null; /** * The status of the message input. One of `in_progress`, `completed`, or * `incomplete`. Populated when input items are returned via API. */ - status?: 'in_progress' | 'completed' | 'incomplete'; + status?: 'in_progress' | 'completed' | 'incomplete' | null; } export namespace ComputerCallOutput { @@ -1786,12 +1792,12 @@ export namespace ResponseInputItem { /** * The type of the pending safety check. */ - code: string; + code?: string | null; /** * Details about the pending safety check. */ - message: string; + message?: string | null; } } @@ -1818,13 +1824,13 @@ export namespace ResponseInputItem { * The unique ID of the function tool call output. Populated when this item is * returned via API. */ - id?: string; + id?: string | null; /** * The status of the item. One of `in_progress`, `completed`, or `incomplete`. * Populated when items are returned via API. */ - status?: 'in_progress' | 'completed' | 'incomplete'; + status?: 'in_progress' | 'completed' | 'incomplete' | null; } /** @@ -1839,7 +1845,7 @@ export namespace ResponseInputItem { /** * The type of item to reference. Always `item_reference`. */ - type: 'item_reference'; + type?: 'item_reference' | null; } } @@ -2119,7 +2125,9 @@ export namespace ResponseOutputText { /** * A description of the chain of thought used by a reasoning model while generating - * a response. + * a response. Be sure to include these items in your `input` to the Responses API + * for subsequent turns of a conversation if you are manually + * [managing context](https://platform.openai.com/docs/guides/conversation-state). */ export interface ResponseReasoningItem { /** @@ -2137,6 +2145,12 @@ export interface ResponseReasoningItem { */ type: 'reasoning'; + /** + * The encrypted content of the reasoning item - populated when a response is + * generated with `reasoning.encrypted_content` in the `include` parameter. + */ + encrypted_content?: string | null; + /** * The status of the item. One of `in_progress`, `completed`, or `incomplete`. * Populated when items are returned via API. @@ -2730,11 +2744,9 @@ export interface ResponseWebSearchCallSearchingEvent { } /** - * A tool that searches for relevant content from uploaded files. Learn more about - * the - * [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + * A tool that can be used to generate a response. */ -export type Tool = FileSearchTool | FunctionTool | ComputerTool | WebSearchTool; +export type Tool = FileSearchTool | FunctionTool | WebSearchTool | ComputerTool; /** * Use this option to force the model to call a specific function. @@ -2788,10 +2800,8 @@ export interface ToolChoiceTypes { */ export interface WebSearchTool { /** - * The type of the web search tool. One of: - * - * - `web_search_preview` - * - `web_search_preview_2025_03_11` + * The type of the web search tool. One of `web_search_preview` or + * `web_search_preview_2025_03_11`. */ type: 'web_search_preview' | 'web_search_preview_2025_03_11'; @@ -2801,10 +2811,16 @@ export interface WebSearchTool { */ search_context_size?: 'low' | 'medium' | 'high'; + /** + * The user's location. + */ user_location?: WebSearchTool.UserLocation | null; } export namespace WebSearchTool { + /** + * The user's location. + */ export interface UserLocation { /** * The type of location approximation. Always `approximate`. @@ -2814,24 +2830,24 @@ export namespace WebSearchTool { /** * Free text input for the city of the user, e.g. `San Francisco`. */ - city?: string; + city?: string | null; /** * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of * the user, e.g. `US`. */ - country?: string; + country?: string | null; /** * Free text input for the region of the user, e.g. `California`. */ - region?: string; + region?: string | null; /** * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the * user, e.g. `America/Los_Angeles`. */ - timezone?: string; + timezone?: string | null; } } @@ -2869,6 +2885,11 @@ export interface ResponseCreateParamsBase { * - `message.input_image.image_url`: Include image urls from the input message. * - `computer_call_output.output.image_url`: Include image urls from the computer * call output. + * - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + * tokens in reasoning item outputs. This enables reasoning items to be used in + * multi-turn conversations when using the Responses API statelessly (like when + * the `store` parameter is set to `false`, or when an organization is enrolled + * in the zero data retention program). */ include?: Array | null; diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index e9b460254..04fca0a2a 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -51,6 +51,7 @@ describe('resource images', () => { const response = await client.images.edit({ image: await toFile(Buffer.from('# my file contents'), 'README.md'), prompt: 'A cute baby sea otter wearing a beret', + background: 'transparent', mask: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'string', n: 1, From 5bb454391f34c6c0d9e8b3b22d0e407c31641bfa Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 2 May 2025 19:10:25 +0000 Subject: [PATCH 521/533] release: 4.97.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 88f780d30..4e19f03d6 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.96.2" + ".": "4.97.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index d724d8922..6fa637742 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 4.97.0 (2025-05-02) + +Full Changelog: [v4.96.2...v4.97.0](https://github.com/openai/openai-node/compare/v4.96.2...v4.97.0) + +### Features + +* **api:** add image sizes, reasoning encryption ([9c2113a](https://github.com/openai/openai-node/commit/9c2113af7c7ea9a797a0e39d07d9ad8627c96acb)) + + +### Chores + +* **docs:** add missing deprecation warnings ([253392c](https://github.com/openai/openai-node/commit/253392c93adca88e0ee83f784183b2128ff64a16)) + + +### Documentation + +* fix "procesing" -> "processing" in realtime examples ([#1406](https://github.com/openai/openai-node/issues/1406)) ([8717b9f](https://github.com/openai/openai-node/commit/8717b9fce87d03e51d40ee58f5d6259408405e1f)) +* **readme:** fix typo ([cab3478](https://github.com/openai/openai-node/commit/cab3478f195f9de5c21033a1b3684f52ad347ffc)) + ## 4.96.2 (2025-04-29) Full Changelog: [v4.96.1...v4.96.2](https://github.com/openai/openai-node/compare/v4.96.1...v4.96.2) diff --git a/jsr.json b/jsr.json index 8eca06e74..fd3ca4a41 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.96.2", + "version": "4.97.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index d563394c0..8b9281b35 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.96.2", + "version": "4.97.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 1674d74fe..97cbc5900 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.96.2'; // x-release-please-version +export const VERSION = '4.97.0'; // x-release-please-version From 6469d5323b653f19e90a7470d81c914c640c6f8b Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 5 May 2025 09:32:07 +0100 Subject: [PATCH 522/533] chore(internal): fix formatting --- examples/azure/realtime/websocket.ts | 2 +- examples/azure/realtime/ws.ts | 2 +- examples/realtime/websocket.ts | 2 +- examples/realtime/ws.ts | 2 +- examples/tsconfig.json | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/azure/realtime/websocket.ts b/examples/azure/realtime/websocket.ts index 4175b4a71..91fe3b7b9 100644 --- a/examples/azure/realtime/websocket.ts +++ b/examples/azure/realtime/websocket.ts @@ -57,4 +57,4 @@ async function main() { rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); } -main(); \ No newline at end of file +main(); diff --git a/examples/azure/realtime/ws.ts b/examples/azure/realtime/ws.ts index e86a79092..8b22aeef0 100644 --- a/examples/azure/realtime/ws.ts +++ b/examples/azure/realtime/ws.ts @@ -57,4 +57,4 @@ async function main() { rt.socket.on('close', () => console.log('\nConnection closed!')); } -main(); \ No newline at end of file +main(); diff --git a/examples/realtime/websocket.ts b/examples/realtime/websocket.ts index f1c46dd41..6fb4740af 100644 --- a/examples/realtime/websocket.ts +++ b/examples/realtime/websocket.ts @@ -45,4 +45,4 @@ async function main() { rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); } -main(); \ No newline at end of file +main(); diff --git a/examples/realtime/ws.ts b/examples/realtime/ws.ts index 1ce6b2045..6cc950b76 100644 --- a/examples/realtime/ws.ts +++ b/examples/realtime/ws.ts @@ -45,4 +45,4 @@ async function main() { rt.socket.on('close', () => console.log('\nConnection closed!')); } -main(); \ No newline at end of file +main(); diff --git a/examples/tsconfig.json b/examples/tsconfig.json index 6c3477462..3c43903cf 100644 --- a/examples/tsconfig.json +++ b/examples/tsconfig.json @@ -1,3 +1,3 @@ { - "extends": "../tsconfig.json" + "extends": "../tsconfig.json" } From bbf5d45259a8bfba62e2217955597ec0f6cfead4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 6 May 2025 19:19:08 +0000 Subject: [PATCH 523/533] chore(ci): bump node version for release workflows --- .github/workflows/ci.yml | 4 ++-- .github/workflows/publish-jsr.yml | 2 +- .github/workflows/publish-npm.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 49a043930..09f1636b6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -72,7 +72,7 @@ jobs: - name: Set up Node uses: actions/setup-node@v4 with: - node-version: '18' + node-version: '20' - name: Bootstrap run: ./scripts/bootstrap @@ -92,7 +92,7 @@ jobs: - name: Set up Node uses: actions/setup-node@v4 with: - node-version: '18' + node-version: '20' - name: Install dependencies run: | yarn install diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml index 1e46d6bfb..e74673c1f 100644 --- a/.github/workflows/publish-jsr.yml +++ b/.github/workflows/publish-jsr.yml @@ -19,7 +19,7 @@ jobs: - name: Set up Node uses: actions/setup-node@v3 with: - node-version: '18' + node-version: '20' - name: Install dependencies run: | diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index 5a3711b53..0662a79c5 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Node uses: actions/setup-node@v3 with: - node-version: '18' + node-version: '20' - name: Install dependencies run: | From e8d2092e51015b05fe7ef33ef5a9d7652846b137 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 6 May 2025 22:24:41 +0000 Subject: [PATCH 524/533] docs: add examples to tsdocs --- src/resources/audio/speech.ts | 12 +++++ src/resources/audio/transcriptions.ts | 9 ++++ src/resources/audio/translations.ts | 8 +++ src/resources/beta/assistants.ts | 36 +++++++++++++ src/resources/beta/realtime/sessions.ts | 6 +++ .../beta/realtime/transcription-sessions.ts | 6 +++ src/resources/beta/threads/messages.ts | 43 +++++++++++++++ src/resources/beta/threads/runs/runs.ts | 52 +++++++++++++++++++ src/resources/beta/threads/runs/steps.ts | 21 ++++++++ src/resources/beta/threads/threads.ts | 33 ++++++++++++ src/resources/chat/completions/completions.ts | 38 ++++++++++++++ src/resources/chat/completions/messages.ts | 10 ++++ src/resources/completions.ts | 8 +++ src/resources/embeddings.ts | 9 ++++ .../fine-tuning/checkpoints/permissions.ts | 28 ++++++++++ src/resources/fine-tuning/jobs/checkpoints.ts | 10 ++++ src/resources/fine-tuning/jobs/jobs.ts | 40 ++++++++++++++ src/resources/images.ts | 22 ++++++++ src/resources/responses/input-items.ts | 10 ++++ src/resources/responses/responses.ts | 22 ++++++++ 20 files changed, 423 insertions(+) diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index e218c8299..ccd37c092 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -7,6 +7,18 @@ import { type Response } from '../../_shims/index'; export class Speech extends APIResource { /** * Generates audio from the input text. + * + * @example + * ```ts + * const speech = await client.audio.speech.create({ + * input: 'input', + * model: 'string', + * voice: 'ash', + * }); + * + * const content = await speech.blob(); + * console.log(content); + * ``` */ create(body: SpeechCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/audio/speech', { diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index ba4fec6c5..8d563e0ba 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -9,6 +9,15 @@ import { Stream } from '../../streaming'; export class Transcriptions extends APIResource { /** * Transcribes audio into the input language. + * + * @example + * ```ts + * const transcription = + * await client.audio.transcriptions.create({ + * file: fs.createReadStream('speech.mp3'), + * model: 'gpt-4o-transcribe', + * }); + * ``` */ create( body: TranscriptionCreateParamsNonStreaming<'json' | undefined>, diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index df312f876..1edb71a7d 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -8,6 +8,14 @@ import * as TranscriptionsAPI from './transcriptions'; export class Translations extends APIResource { /** * Translates audio into English. + * + * @example + * ```ts + * const translation = await client.audio.translations.create({ + * file: fs.createReadStream('speech.mp3'), + * model: 'whisper-1', + * }); + * ``` */ create( body: TranslationCreateParams<'json' | undefined>, diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 00a6ff2cf..95581bbc8 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -14,6 +14,13 @@ import { AssistantStream } from '../../lib/AssistantStream'; export class Assistants extends APIResource { /** * Create an assistant with a model and instructions. + * + * @example + * ```ts + * const assistant = await client.beta.assistants.create({ + * model: 'gpt-4o', + * }); + * ``` */ create(body: AssistantCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/assistants', { @@ -25,6 +32,13 @@ export class Assistants extends APIResource { /** * Retrieves an assistant. + * + * @example + * ```ts + * const assistant = await client.beta.assistants.retrieve( + * 'assistant_id', + * ); + * ``` */ retrieve(assistantId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/assistants/${assistantId}`, { @@ -35,6 +49,13 @@ export class Assistants extends APIResource { /** * Modifies an assistant. + * + * @example + * ```ts + * const assistant = await client.beta.assistants.update( + * 'assistant_id', + * ); + * ``` */ update( assistantId: string, @@ -50,6 +71,14 @@ export class Assistants extends APIResource { /** * Returns a list of assistants. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const assistant of client.beta.assistants.list()) { + * // ... + * } + * ``` */ list( query?: AssistantListParams, @@ -72,6 +101,13 @@ export class Assistants extends APIResource { /** * Delete an assistant. + * + * @example + * ```ts + * const assistantDeleted = await client.beta.assistants.del( + * 'assistant_id', + * ); + * ``` */ del(assistantId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/assistants/${assistantId}`, { diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts index 28a44431e..a55a2678c 100644 --- a/src/resources/beta/realtime/sessions.ts +++ b/src/resources/beta/realtime/sessions.ts @@ -12,6 +12,12 @@ export class Sessions extends APIResource { * It responds with a session object, plus a `client_secret` key which contains a * usable ephemeral API token that can be used to authenticate browser clients for * the Realtime API. + * + * @example + * ```ts + * const session = + * await client.beta.realtime.sessions.create(); + * ``` */ create(body: SessionCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/realtime/sessions', { diff --git a/src/resources/beta/realtime/transcription-sessions.ts b/src/resources/beta/realtime/transcription-sessions.ts index a54ec1125..61e58a8e8 100644 --- a/src/resources/beta/realtime/transcription-sessions.ts +++ b/src/resources/beta/realtime/transcription-sessions.ts @@ -12,6 +12,12 @@ export class TranscriptionSessions extends APIResource { * It responds with a session object, plus a `client_secret` key which contains a * usable ephemeral API token that can be used to authenticate browser clients for * the Realtime API. + * + * @example + * ```ts + * const transcriptionSession = + * await client.beta.realtime.transcriptionSessions.create(); + * ``` */ create( body: TranscriptionSessionCreateParams, diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 29fd2b29f..c3834ebe6 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -10,6 +10,14 @@ import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Messages extends APIResource { /** * Create a message. + * + * @example + * ```ts + * const message = await client.beta.threads.messages.create( + * 'thread_id', + * { content: 'string', role: 'user' }, + * ); + * ``` */ create( threadId: string, @@ -25,6 +33,14 @@ export class Messages extends APIResource { /** * Retrieve a message. + * + * @example + * ```ts + * const message = await client.beta.threads.messages.retrieve( + * 'thread_id', + * 'message_id', + * ); + * ``` */ retrieve(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}/messages/${messageId}`, { @@ -35,6 +51,14 @@ export class Messages extends APIResource { /** * Modifies a message. + * + * @example + * ```ts + * const message = await client.beta.threads.messages.update( + * 'thread_id', + * 'message_id', + * ); + * ``` */ update( threadId: string, @@ -51,6 +75,16 @@ export class Messages extends APIResource { /** * Returns a list of messages for a given thread. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const message of client.beta.threads.messages.list( + * 'thread_id', + * )) { + * // ... + * } + * ``` */ list( threadId: string, @@ -75,6 +109,15 @@ export class Messages extends APIResource { /** * Deletes a message. + * + * @example + * ```ts + * const messageDeleted = + * await client.beta.threads.messages.del( + * 'thread_id', + * 'message_id', + * ); + * ``` */ del(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/threads/${threadId}/messages/${messageId}`, { diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 15bfb4204..25356df3c 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -45,6 +45,14 @@ export class Runs extends APIResource { /** * Create a run. + * + * @example + * ```ts + * const run = await client.beta.threads.runs.create( + * 'thread_id', + * { assistant_id: 'assistant_id' }, + * ); + * ``` */ create( threadId: string, @@ -78,6 +86,14 @@ export class Runs extends APIResource { /** * Retrieves a run. + * + * @example + * ```ts + * const run = await client.beta.threads.runs.retrieve( + * 'thread_id', + * 'run_id', + * ); + * ``` */ retrieve(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}/runs/${runId}`, { @@ -88,6 +104,14 @@ export class Runs extends APIResource { /** * Modifies a run. + * + * @example + * ```ts + * const run = await client.beta.threads.runs.update( + * 'thread_id', + * 'run_id', + * ); + * ``` */ update( threadId: string, @@ -104,6 +128,16 @@ export class Runs extends APIResource { /** * Returns a list of runs belonging to a thread. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const run of client.beta.threads.runs.list( + * 'thread_id', + * )) { + * // ... + * } + * ``` */ list( threadId: string, @@ -128,6 +162,14 @@ export class Runs extends APIResource { /** * Cancels a run that is `in_progress`. + * + * @example + * ```ts + * const run = await client.beta.threads.runs.cancel( + * 'thread_id', + * 'run_id', + * ); + * ``` */ cancel(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.post(`/threads/${threadId}/runs/${runId}/cancel`, { @@ -229,6 +271,16 @@ export class Runs extends APIResource { * `submit_tool_outputs`, this endpoint can be used to submit the outputs from the * tool calls once they're all completed. All outputs must be submitted in a single * request. + * + * @example + * ```ts + * const run = + * await client.beta.threads.runs.submitToolOutputs( + * 'thread_id', + * 'run_id', + * { tool_outputs: [{}] }, + * ); + * ``` */ submitToolOutputs( threadId: string, diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index c491b4e83..abd8d40ed 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -10,6 +10,16 @@ import { CursorPage, type CursorPageParams } from '../../../../pagination'; export class Steps extends APIResource { /** * Retrieves a run step. + * + * @example + * ```ts + * const runStep = + * await client.beta.threads.runs.steps.retrieve( + * 'thread_id', + * 'run_id', + * 'step_id', + * ); + * ``` */ retrieve( threadId: string, @@ -43,6 +53,17 @@ export class Steps extends APIResource { /** * Returns a list of run steps belonging to a run. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const runStep of client.beta.threads.runs.steps.list( + * 'thread_id', + * 'run_id', + * )) { + * // ... + * } + * ``` */ list( threadId: string, diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 2e5ab1cc8..c0c6bc8e4 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -73,6 +73,11 @@ export class Threads extends APIResource { /** * Create a thread. + * + * @example + * ```ts + * const thread = await client.beta.threads.create(); + * ``` */ create(body?: ThreadCreateParams, options?: Core.RequestOptions): Core.APIPromise; create(options?: Core.RequestOptions): Core.APIPromise; @@ -92,6 +97,13 @@ export class Threads extends APIResource { /** * Retrieves a thread. + * + * @example + * ```ts + * const thread = await client.beta.threads.retrieve( + * 'thread_id', + * ); + * ``` */ retrieve(threadId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}`, { @@ -102,6 +114,13 @@ export class Threads extends APIResource { /** * Modifies a thread. + * + * @example + * ```ts + * const thread = await client.beta.threads.update( + * 'thread_id', + * ); + * ``` */ update(threadId: string, body: ThreadUpdateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post(`/threads/${threadId}`, { @@ -113,6 +132,13 @@ export class Threads extends APIResource { /** * Delete a thread. + * + * @example + * ```ts + * const threadDeleted = await client.beta.threads.del( + * 'thread_id', + * ); + * ``` */ del(threadId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/threads/${threadId}`, { @@ -123,6 +149,13 @@ export class Threads extends APIResource { /** * Create a thread and run it in one request. + * + * @example + * ```ts + * const run = await client.beta.threads.createAndRun({ + * assistant_id: 'assistant_id', + * }); + * ``` */ createAndRun( body: ThreadCreateAndRunParamsNonStreaming, diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 251020337..6481f8e0f 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -33,6 +33,16 @@ export class Completions extends APIResource { * supported for reasoning models are noted below. For the current state of * unsupported parameters in reasoning models, * [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + * + * @example + * ```ts + * const chatCompletion = await client.chat.completions.create( + * { + * messages: [{ content: 'string', role: 'developer' }], + * model: 'gpt-4o', + * }, + * ); + * ``` */ create( body: ChatCompletionCreateParamsNonStreaming, @@ -58,6 +68,12 @@ export class Completions extends APIResource { /** * Get a stored chat completion. Only Chat Completions that have been created with * the `store` parameter set to `true` will be returned. + * + * @example + * ```ts + * const chatCompletion = + * await client.chat.completions.retrieve('completion_id'); + * ``` */ retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/chat/completions/${completionId}`, options); @@ -67,6 +83,14 @@ export class Completions extends APIResource { * Modify a stored chat completion. Only Chat Completions that have been created * with the `store` parameter set to `true` can be modified. Currently, the only * supported modification is to update the `metadata` field. + * + * @example + * ```ts + * const chatCompletion = await client.chat.completions.update( + * 'completion_id', + * { metadata: { foo: 'string' } }, + * ); + * ``` */ update( completionId: string, @@ -79,6 +103,14 @@ export class Completions extends APIResource { /** * List stored Chat Completions. Only Chat Completions that have been stored with * the `store` parameter set to `true` will be returned. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const chatCompletion of client.chat.completions.list()) { + * // ... + * } + * ``` */ list( query?: ChatCompletionListParams, @@ -98,6 +130,12 @@ export class Completions extends APIResource { /** * Delete a stored chat completion. Only Chat Completions that have been created * with the `store` parameter set to `true` can be deleted. + * + * @example + * ```ts + * const chatCompletionDeleted = + * await client.chat.completions.del('completion_id'); + * ``` */ del(completionId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/chat/completions/${completionId}`, options); diff --git a/src/resources/chat/completions/messages.ts b/src/resources/chat/completions/messages.ts index 519a33aff..ab3eb73f6 100644 --- a/src/resources/chat/completions/messages.ts +++ b/src/resources/chat/completions/messages.ts @@ -11,6 +11,16 @@ export class Messages extends APIResource { /** * Get the messages in a stored chat completion. Only Chat Completions that have * been created with the `store` parameter set to `true` will be returned. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const chatCompletionStoreMessage of client.chat.completions.messages.list( + * 'completion_id', + * )) { + * // ... + * } + * ``` */ list( completionId: string, diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 5cbec5e3c..07cb49ed9 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -10,6 +10,14 @@ import { Stream } from '../streaming'; export class Completions extends APIResource { /** * Creates a completion for the provided prompt and parameters. + * + * @example + * ```ts + * const completion = await client.completions.create({ + * model: 'string', + * prompt: 'This is a test.', + * }); + * ``` */ create(body: CompletionCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; create( diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index a4be9ca3c..cc040abff 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -6,6 +6,15 @@ import * as Core from '../core'; export class Embeddings extends APIResource { /** * Creates an embedding vector representing the input text. + * + * @example + * ```ts + * const createEmbeddingResponse = + * await client.embeddings.create({ + * input: 'The quick brown fox jumped over the lazy dog', + * model: 'text-embedding-3-small', + * }); + * ``` */ create( body: EmbeddingCreateParams, diff --git a/src/resources/fine-tuning/checkpoints/permissions.ts b/src/resources/fine-tuning/checkpoints/permissions.ts index e808b2001..dc25bab7f 100644 --- a/src/resources/fine-tuning/checkpoints/permissions.ts +++ b/src/resources/fine-tuning/checkpoints/permissions.ts @@ -11,6 +11,17 @@ export class Permissions extends APIResource { * * This enables organization owners to share fine-tuned models with other projects * in their organization. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const permissionCreateResponse of client.fineTuning.checkpoints.permissions.create( + * 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + * { project_ids: ['string'] }, + * )) { + * // ... + * } + * ``` */ create( fineTunedModelCheckpoint: string, @@ -29,6 +40,14 @@ export class Permissions extends APIResource { * * Organization owners can use this endpoint to view all permissions for a * fine-tuned model checkpoint. + * + * @example + * ```ts + * const permission = + * await client.fineTuning.checkpoints.permissions.retrieve( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` */ retrieve( fineTunedModelCheckpoint: string, @@ -58,6 +77,15 @@ export class Permissions extends APIResource { * * Organization owners can use this endpoint to delete a permission for a * fine-tuned model checkpoint. + * + * @example + * ```ts + * const permission = + * await client.fineTuning.checkpoints.permissions.del( + * 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + * 'cp_zc4Q7MP6XxulcVzj4MZdwsAB', + * ); + * ``` */ del( fineTunedModelCheckpoint: string, diff --git a/src/resources/fine-tuning/jobs/checkpoints.ts b/src/resources/fine-tuning/jobs/checkpoints.ts index b3018ac5f..10902e715 100644 --- a/src/resources/fine-tuning/jobs/checkpoints.ts +++ b/src/resources/fine-tuning/jobs/checkpoints.ts @@ -8,6 +8,16 @@ import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Checkpoints extends APIResource { /** * List checkpoints for a fine-tuning job. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const fineTuningJobCheckpoint of client.fineTuning.jobs.checkpoints.list( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * )) { + * // ... + * } + * ``` */ list( fineTuningJobId: string, diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 2198e8174..0bc812917 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -23,6 +23,14 @@ export class Jobs extends APIResource { * of the fine-tuned models once complete. * * [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.create({ + * model: 'gpt-4o-mini', + * training_file: 'file-abc123', + * }); + * ``` */ create(body: JobCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/fine_tuning/jobs', { body, ...options }); @@ -32,6 +40,13 @@ export class Jobs extends APIResource { * Get info about a fine-tuning job. * * [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.retrieve( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` */ retrieve(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/fine_tuning/jobs/${fineTuningJobId}`, options); @@ -39,6 +54,14 @@ export class Jobs extends APIResource { /** * List your organization's fine-tuning jobs + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const fineTuningJob of client.fineTuning.jobs.list()) { + * // ... + * } + * ``` */ list( query?: JobListParams, @@ -57,6 +80,13 @@ export class Jobs extends APIResource { /** * Immediately cancel a fine-tune job. + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.cancel( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` */ cancel(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.post(`/fine_tuning/jobs/${fineTuningJobId}/cancel`, options); @@ -64,6 +94,16 @@ export class Jobs extends APIResource { /** * Get status updates for a fine-tuning job. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const fineTuningJobEvent of client.fineTuning.jobs.listEvents( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * )) { + * // ... + * } + * ``` */ listEvents( fineTuningJobId: string, diff --git a/src/resources/images.ts b/src/resources/images.ts index 32f1e123c..c6b14833a 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -6,6 +6,13 @@ import * as Core from '../core'; export class Images extends APIResource { /** * Creates a variation of a given image. This endpoint only supports `dall-e-2`. + * + * @example + * ```ts + * const imagesResponse = await client.images.createVariation({ + * image: fs.createReadStream('otter.png'), + * }); + * ``` */ createVariation( body: ImageCreateVariationParams, @@ -17,6 +24,14 @@ export class Images extends APIResource { /** * Creates an edited or extended image given one or more source images and a * prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. + * + * @example + * ```ts + * const imagesResponse = await client.images.edit({ + * image: fs.createReadStream('path/to/file'), + * prompt: 'A cute baby sea otter wearing a beret', + * }); + * ``` */ edit(body: ImageEditParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/images/edits', Core.multipartFormRequestOptions({ body, ...options })); @@ -25,6 +40,13 @@ export class Images extends APIResource { /** * Creates an image given a prompt. * [Learn more](https://platform.openai.com/docs/guides/images). + * + * @example + * ```ts + * const imagesResponse = await client.images.generate({ + * prompt: 'A cute baby sea otter', + * }); + * ``` */ generate(body: ImageGenerateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/images/generations', { body, ...options }); diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts index c88bb441d..74707f184 100644 --- a/src/resources/responses/input-items.ts +++ b/src/resources/responses/input-items.ts @@ -10,6 +10,16 @@ import { type CursorPageParams } from '../../pagination'; export class InputItems extends APIResource { /** * Returns a list of input items for a given response. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const responseItem of client.responses.inputItems.list( + * 'response_id', + * )) { + * // ... + * } + * ``` */ list( responseId: string, diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index 0a6e3666d..1440e865e 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -61,6 +61,14 @@ export class Responses extends APIResource { * [web search](https://platform.openai.com/docs/guides/tools-web-search) or * [file search](https://platform.openai.com/docs/guides/tools-file-search) to use * your own data as input for the model's response. + * + * @example + * ```ts + * const response = await client.responses.create({ + * input: 'string', + * model: 'gpt-4o', + * }); + * ``` */ create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; create( @@ -90,6 +98,13 @@ export class Responses extends APIResource { /** * Retrieves a model response with the given ID. + * + * @example + * ```ts + * const response = await client.responses.retrieve( + * 'resp_677efb5139a88190b512bc3fef8e535d', + * ); + * ``` */ retrieve( responseId: string, @@ -110,6 +125,13 @@ export class Responses extends APIResource { /** * Deletes a model response with the given ID. + * + * @example + * ```ts + * await client.responses.del( + * 'resp_677efb5139a88190b512bc3fef8e535d', + * ); + * ``` */ del(responseId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/responses/${responseId}`, { From fabe6ec948c08c11588f6168f0a7560bf307d780 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 8 May 2025 17:23:50 +0000 Subject: [PATCH 525/533] feat(api): Add reinforcement fine-tuning api support --- .stats.yml | 8 +- api.md | 43 +- src/index.ts | 11 +- src/resources/evals/evals.ts | 732 ++---------------- src/resources/evals/index.ts | 3 - src/resources/fine-tuning/alpha.ts | 3 + src/resources/fine-tuning/alpha/alpha.ts | 27 + src/resources/fine-tuning/alpha/graders.ts | 168 ++++ src/resources/fine-tuning/alpha/index.ts | 10 + src/resources/fine-tuning/fine-tuning.ts | 28 + src/resources/fine-tuning/index.ts | 10 + src/resources/fine-tuning/jobs/jobs.ts | 203 +---- src/resources/fine-tuning/methods.ts | 152 ++++ src/resources/graders.ts | 3 + src/resources/graders/grader-models.ts | 296 +++++++ src/resources/graders/graders.ts | 31 + src/resources/graders/index.ts | 12 + src/resources/index.ts | 4 +- .../fine-tuning/alpha/graders.test.ts | 53 ++ .../fine-tuning/jobs/jobs.test.ts | 56 +- 20 files changed, 1019 insertions(+), 834 deletions(-) create mode 100644 src/resources/fine-tuning/alpha.ts create mode 100644 src/resources/fine-tuning/alpha/alpha.ts create mode 100644 src/resources/fine-tuning/alpha/graders.ts create mode 100644 src/resources/fine-tuning/alpha/index.ts create mode 100644 src/resources/fine-tuning/methods.ts create mode 100644 src/resources/graders.ts create mode 100644 src/resources/graders/grader-models.ts create mode 100644 src/resources/graders/graders.ts create mode 100644 src/resources/graders/index.ts create mode 100644 tests/api-resources/fine-tuning/alpha/graders.test.ts diff --git a/.stats.yml b/.stats.yml index 0c8278866..5f1bee851 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-0ee6b36cf3cc278cef4199a6aec5f7d530a6c1f17a74830037e96d50ca1edc50.yml -openapi_spec_hash: e8ec5f46bc0655b34f292422d58a60f6 -config_hash: d9b6b6e6bc85744663e300eebc482067 +configured_endpoints: 101 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-794a6ed3c3d3d77887564755168056af8a426b17cf1ec721e3a300503dc22a41.yml +openapi_spec_hash: 25a81c220713cd5b0bafc221d1dfa79a +config_hash: 0b768ed1b56c6d82816f0fa40dc4aaf5 diff --git a/api.md b/api.md index 49e6548a8..cad696e7e 100644 --- a/api.md +++ b/api.md @@ -207,6 +207,17 @@ Methods: # FineTuning +## Methods + +Types: + +- DpoHyperparameters +- DpoMethod +- ReinforcementHyperparameters +- ReinforcementMethod +- SupervisedHyperparameters +- SupervisedMethod + ## Jobs Types: @@ -224,6 +235,8 @@ Methods: - client.fineTuning.jobs.list({ ...params }) -> FineTuningJobsPage - client.fineTuning.jobs.cancel(fineTuningJobId) -> FineTuningJob - client.fineTuning.jobs.listEvents(fineTuningJobId, { ...params }) -> FineTuningJobEventsPage +- client.fineTuning.jobs.pause(fineTuningJobId) -> FineTuningJob +- client.fineTuning.jobs.resume(fineTuningJobId) -> FineTuningJob ### Checkpoints @@ -251,6 +264,33 @@ Methods: - client.fineTuning.checkpoints.permissions.retrieve(fineTunedModelCheckpoint, { ...params }) -> PermissionRetrieveResponse - client.fineTuning.checkpoints.permissions.del(fineTunedModelCheckpoint, permissionId) -> PermissionDeleteResponse +## Alpha + +### Graders + +Types: + +- GraderRunResponse +- GraderValidateResponse + +Methods: + +- client.fineTuning.alpha.graders.run({ ...params }) -> GraderRunResponse +- client.fineTuning.alpha.graders.validate({ ...params }) -> GraderValidateResponse + +# Graders + +## GraderModels + +Types: + +- LabelModelGrader +- MultiGrader +- PythonGrader +- ScoreModelGrader +- StringCheckGrader +- TextSimilarityGrader + # VectorStores Types: @@ -669,10 +709,7 @@ Methods: Types: - EvalCustomDataSourceConfig -- EvalLabelModelGrader - EvalStoredCompletionsDataSourceConfig -- EvalStringCheckGrader -- EvalTextSimilarityGrader - EvalCreateResponse - EvalRetrieveResponse - EvalUpdateResponse diff --git a/src/index.ts b/src/index.ts index 9e8d7ce37..537c18f43 100644 --- a/src/index.ts +++ b/src/index.ts @@ -71,19 +71,17 @@ import { EvalCreateResponse, EvalCustomDataSourceConfig, EvalDeleteResponse, - EvalLabelModelGrader, EvalListParams, EvalListResponse, EvalListResponsesPage, EvalRetrieveResponse, EvalStoredCompletionsDataSourceConfig, - EvalStringCheckGrader, - EvalTextSimilarityGrader, EvalUpdateParams, EvalUpdateResponse, Evals, } from './resources/evals/evals'; import { FineTuning } from './resources/fine-tuning/fine-tuning'; +import { Graders } from './resources/graders/graders'; import { Responses } from './resources/responses/responses'; import { Upload, @@ -305,6 +303,7 @@ export class OpenAI extends Core.APIClient { moderations: API.Moderations = new API.Moderations(this); models: API.Models = new API.Models(this); fineTuning: API.FineTuning = new API.FineTuning(this); + graders: API.Graders = new API.Graders(this); vectorStores: API.VectorStores = new API.VectorStores(this); beta: API.Beta = new API.Beta(this); batches: API.Batches = new API.Batches(this); @@ -366,6 +365,7 @@ OpenAI.Moderations = Moderations; OpenAI.Models = Models; OpenAI.ModelsPage = ModelsPage; OpenAI.FineTuning = FineTuning; +OpenAI.Graders = Graders; OpenAI.VectorStores = VectorStores; OpenAI.VectorStoresPage = VectorStoresPage; OpenAI.VectorStoreSearchResponsesPage = VectorStoreSearchResponsesPage; @@ -487,6 +487,8 @@ export declare namespace OpenAI { export { FineTuning as FineTuning }; + export { Graders as Graders }; + export { VectorStores as VectorStores, type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, @@ -531,10 +533,7 @@ export declare namespace OpenAI { export { Evals as Evals, type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, - type EvalLabelModelGrader as EvalLabelModelGrader, type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, - type EvalStringCheckGrader as EvalStringCheckGrader, - type EvalTextSimilarityGrader as EvalTextSimilarityGrader, type EvalCreateResponse as EvalCreateResponse, type EvalRetrieveResponse as EvalRetrieveResponse, type EvalUpdateResponse as EvalUpdateResponse, diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index caef7acc0..0f7166df4 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -4,6 +4,7 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as Shared from '../shared'; +import * as GraderModelsAPI from '../graders/grader-models'; import * as ResponsesAPI from '../responses/responses'; import * as RunsAPI from './runs/runs'; import { @@ -103,83 +104,6 @@ export interface EvalCustomDataSourceConfig { type: 'custom'; } -/** - * A LabelModelGrader object which uses a model to assign labels to each item in - * the evaluation. - */ -export interface EvalLabelModelGrader { - input: Array; - - /** - * The labels to assign to each item in the evaluation. - */ - labels: Array; - - /** - * The model to use for the evaluation. Must support structured outputs. - */ - model: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The labels that indicate a passing result. Must be a subset of labels. - */ - passing_labels: Array; - - /** - * The object type, which is always `label_model`. - */ - type: 'label_model'; -} - -export namespace EvalLabelModelGrader { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } -} - /** * A StoredCompletionsDataSourceConfig which specifies the metadata property of * your stored completions query. This is usually metadata like `usecase=chatbot` @@ -210,83 +134,6 @@ export interface EvalStoredCompletionsDataSourceConfig { metadata?: Shared.Metadata | null; } -/** - * A StringCheckGrader object that performs a string comparison between input and - * reference using a specified operation. - */ -export interface EvalStringCheckGrader { - /** - * The input text. This may include template strings. - */ - input: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. - */ - operation: 'eq' | 'ne' | 'like' | 'ilike'; - - /** - * The reference text. This may include template strings. - */ - reference: string; - - /** - * The object type, which is always `string_check`. - */ - type: 'string_check'; -} - -/** - * A TextSimilarityGrader object which grades text based on similarity metrics. - */ -export interface EvalTextSimilarityGrader { - /** - * The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, - * `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. - */ - evaluation_metric: - | 'fuzzy_match' - | 'bleu' - | 'gleu' - | 'meteor' - | 'rouge_1' - | 'rouge_2' - | 'rouge_3' - | 'rouge_4' - | 'rouge_5' - | 'rouge_l'; - - /** - * The text being graded. - */ - input: string; - - /** - * A float score where a value greater than or equal indicates a passing grade. - */ - pass_threshold: number; - - /** - * The text being graded against. - */ - reference: string; - - /** - * The type of grader. - */ - type: 'text_similarity'; - - /** - * The name of the grader. - */ - name?: string; -} - /** * An Eval object with a data source config and testing criteria. An Eval * represents a task to be done for your LLM integration. Like: @@ -335,39 +182,29 @@ export interface EvalCreateResponse { * A list of testing criteria. */ testing_criteria: Array< - | EvalLabelModelGrader - | EvalStringCheckGrader - | EvalTextSimilarityGrader - | EvalCreateResponse.Python - | EvalCreateResponse.ScoreModel + | GraderModelsAPI.LabelModelGrader + | GraderModelsAPI.StringCheckGrader + | EvalCreateResponse.EvalGraderTextSimilarity + | EvalCreateResponse.EvalGraderPython + | EvalCreateResponse.EvalGraderScoreModel >; } export namespace EvalCreateResponse { /** - * A PythonGrader object that runs a python script on the input. + * A TextSimilarityGrader object which grades text based on similarity metrics. */ - export interface Python { - /** - * The name of the grader. - */ - name: string; - + export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader { /** - * The source code of the python script. - */ - source: string; - - /** - * The object type, which is always `python`. - */ - type: 'python'; - - /** - * The image tag to use for the python script. + * The threshold for the score. */ - image_tag?: string; + pass_threshold: number; + } + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface EvalGraderPython extends GraderModelsAPI.PythonGrader { /** * The threshold for the score. */ @@ -377,85 +214,11 @@ export namespace EvalCreateResponse { /** * A ScoreModelGrader object that uses a model to assign a score to the input. */ - export interface ScoreModel { - /** - * The input text. This may include template strings. - */ - input: Array; - - /** - * The model to use for the evaluation. - */ - model: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The object type, which is always `score_model`. - */ - type: 'score_model'; - + export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader { /** * The threshold for the score. */ pass_threshold?: number; - - /** - * The range of the score. Defaults to `[0, 1]`. - */ - range?: Array; - - /** - * The sampling parameters for the model. - */ - sampling_params?: unknown; - } - - export namespace ScoreModel { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } } @@ -507,39 +270,29 @@ export interface EvalRetrieveResponse { * A list of testing criteria. */ testing_criteria: Array< - | EvalLabelModelGrader - | EvalStringCheckGrader - | EvalTextSimilarityGrader - | EvalRetrieveResponse.Python - | EvalRetrieveResponse.ScoreModel + | GraderModelsAPI.LabelModelGrader + | GraderModelsAPI.StringCheckGrader + | EvalRetrieveResponse.EvalGraderTextSimilarity + | EvalRetrieveResponse.EvalGraderPython + | EvalRetrieveResponse.EvalGraderScoreModel >; } export namespace EvalRetrieveResponse { /** - * A PythonGrader object that runs a python script on the input. + * A TextSimilarityGrader object which grades text based on similarity metrics. */ - export interface Python { + export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader { /** - * The name of the grader. - */ - name: string; - - /** - * The source code of the python script. - */ - source: string; - - /** - * The object type, which is always `python`. - */ - type: 'python'; - - /** - * The image tag to use for the python script. + * The threshold for the score. */ - image_tag?: string; + pass_threshold: number; + } + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface EvalGraderPython extends GraderModelsAPI.PythonGrader { /** * The threshold for the score. */ @@ -549,85 +302,11 @@ export namespace EvalRetrieveResponse { /** * A ScoreModelGrader object that uses a model to assign a score to the input. */ - export interface ScoreModel { - /** - * The input text. This may include template strings. - */ - input: Array; - - /** - * The model to use for the evaluation. - */ - model: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The object type, which is always `score_model`. - */ - type: 'score_model'; - + export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader { /** * The threshold for the score. */ pass_threshold?: number; - - /** - * The range of the score. Defaults to `[0, 1]`. - */ - range?: Array; - - /** - * The sampling parameters for the model. - */ - sampling_params?: unknown; - } - - export namespace ScoreModel { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } } @@ -679,39 +358,29 @@ export interface EvalUpdateResponse { * A list of testing criteria. */ testing_criteria: Array< - | EvalLabelModelGrader - | EvalStringCheckGrader - | EvalTextSimilarityGrader - | EvalUpdateResponse.Python - | EvalUpdateResponse.ScoreModel + | GraderModelsAPI.LabelModelGrader + | GraderModelsAPI.StringCheckGrader + | EvalUpdateResponse.EvalGraderTextSimilarity + | EvalUpdateResponse.EvalGraderPython + | EvalUpdateResponse.EvalGraderScoreModel >; } export namespace EvalUpdateResponse { /** - * A PythonGrader object that runs a python script on the input. + * A TextSimilarityGrader object which grades text based on similarity metrics. */ - export interface Python { - /** - * The name of the grader. - */ - name: string; - - /** - * The source code of the python script. - */ - source: string; - - /** - * The object type, which is always `python`. - */ - type: 'python'; - + export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader { /** - * The image tag to use for the python script. + * The threshold for the score. */ - image_tag?: string; + pass_threshold: number; + } + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface EvalGraderPython extends GraderModelsAPI.PythonGrader { /** * The threshold for the score. */ @@ -721,85 +390,11 @@ export namespace EvalUpdateResponse { /** * A ScoreModelGrader object that uses a model to assign a score to the input. */ - export interface ScoreModel { - /** - * The input text. This may include template strings. - */ - input: Array; - - /** - * The model to use for the evaluation. - */ - model: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The object type, which is always `score_model`. - */ - type: 'score_model'; - + export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader { /** * The threshold for the score. */ pass_threshold?: number; - - /** - * The range of the score. Defaults to `[0, 1]`. - */ - range?: Array; - - /** - * The sampling parameters for the model. - */ - sampling_params?: unknown; - } - - export namespace ScoreModel { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } } @@ -851,39 +446,29 @@ export interface EvalListResponse { * A list of testing criteria. */ testing_criteria: Array< - | EvalLabelModelGrader - | EvalStringCheckGrader - | EvalTextSimilarityGrader - | EvalListResponse.Python - | EvalListResponse.ScoreModel + | GraderModelsAPI.LabelModelGrader + | GraderModelsAPI.StringCheckGrader + | EvalListResponse.EvalGraderTextSimilarity + | EvalListResponse.EvalGraderPython + | EvalListResponse.EvalGraderScoreModel >; } export namespace EvalListResponse { /** - * A PythonGrader object that runs a python script on the input. + * A TextSimilarityGrader object which grades text based on similarity metrics. */ - export interface Python { - /** - * The name of the grader. - */ - name: string; - - /** - * The source code of the python script. - */ - source: string; - + export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader { /** - * The object type, which is always `python`. - */ - type: 'python'; - - /** - * The image tag to use for the python script. + * The threshold for the score. */ - image_tag?: string; + pass_threshold: number; + } + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface EvalGraderPython extends GraderModelsAPI.PythonGrader { /** * The threshold for the score. */ @@ -893,85 +478,11 @@ export namespace EvalListResponse { /** * A ScoreModelGrader object that uses a model to assign a score to the input. */ - export interface ScoreModel { - /** - * The input text. This may include template strings. - */ - input: Array; - - /** - * The model to use for the evaluation. - */ - model: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The object type, which is always `score_model`. - */ - type: 'score_model'; - + export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader { /** * The threshold for the score. */ pass_threshold?: number; - - /** - * The range of the score. Defaults to `[0, 1]`. - */ - range?: Array; - - /** - * The sampling parameters for the model. - */ - sampling_params?: unknown; - } - - export namespace ScoreModel { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } } @@ -987,15 +498,15 @@ export interface EvalCreateParams { /** * The configuration for the data source used for the evaluation runs. */ - data_source_config: EvalCreateParams.Custom | EvalCreateParams.Logs; + data_source_config: EvalCreateParams.Custom | EvalCreateParams.StoredCompletions; /** * A list of graders for all eval runs in this group. */ testing_criteria: Array< | EvalCreateParams.LabelModel - | EvalStringCheckGrader - | EvalTextSimilarityGrader + | GraderModelsAPI.StringCheckGrader + | EvalCreateParams.TextSimilarity | EvalCreateParams.Python | EvalCreateParams.ScoreModel >; @@ -1048,14 +559,14 @@ export namespace EvalCreateParams { * completions query. This is usually metadata like `usecase=chatbot` or * `prompt-version=v2`, etc. */ - export interface Logs { + export interface StoredCompletions { /** - * The type of data source. Always `logs`. + * The type of data source. Always `stored_completions`. */ - type: 'logs'; + type: 'stored_completions'; /** - * Metadata filters for the logs data source. + * Metadata filters for the stored completions data source. */ metadata?: Record; } @@ -1154,29 +665,19 @@ export namespace EvalCreateParams { } /** - * A PythonGrader object that runs a python script on the input. + * A TextSimilarityGrader object which grades text based on similarity metrics. */ - export interface Python { + export interface TextSimilarity extends GraderModelsAPI.TextSimilarityGrader { /** - * The name of the grader. - */ - name: string; - - /** - * The source code of the python script. - */ - source: string; - - /** - * The object type, which is always `python`. - */ - type: 'python'; - - /** - * The image tag to use for the python script. + * The threshold for the score. */ - image_tag?: string; + pass_threshold: number; + } + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface Python extends GraderModelsAPI.PythonGrader { /** * The threshold for the score. */ @@ -1186,85 +687,11 @@ export namespace EvalCreateParams { /** * A ScoreModelGrader object that uses a model to assign a score to the input. */ - export interface ScoreModel { - /** - * The input text. This may include template strings. - */ - input: Array; - - /** - * The model to use for the evaluation. - */ - model: string; - - /** - * The name of the grader. - */ - name: string; - - /** - * The object type, which is always `score_model`. - */ - type: 'score_model'; - + export interface ScoreModel extends GraderModelsAPI.ScoreModelGrader { /** * The threshold for the score. */ pass_threshold?: number; - - /** - * The range of the score. Defaults to `[0, 1]`. - */ - range?: Array; - - /** - * The sampling parameters for the model. - */ - sampling_params?: unknown; - } - - export namespace ScoreModel { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } } @@ -1306,10 +733,7 @@ Evals.RunListResponsesPage = RunListResponsesPage; export declare namespace Evals { export { type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, - type EvalLabelModelGrader as EvalLabelModelGrader, type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, - type EvalStringCheckGrader as EvalStringCheckGrader, - type EvalTextSimilarityGrader as EvalTextSimilarityGrader, type EvalCreateResponse as EvalCreateResponse, type EvalRetrieveResponse as EvalRetrieveResponse, type EvalUpdateResponse as EvalUpdateResponse, diff --git a/src/resources/evals/index.ts b/src/resources/evals/index.ts index a246fe4e7..b2627fbf3 100644 --- a/src/resources/evals/index.ts +++ b/src/resources/evals/index.ts @@ -4,10 +4,7 @@ export { EvalListResponsesPage, Evals, type EvalCustomDataSourceConfig, - type EvalLabelModelGrader, type EvalStoredCompletionsDataSourceConfig, - type EvalStringCheckGrader, - type EvalTextSimilarityGrader, type EvalCreateResponse, type EvalRetrieveResponse, type EvalUpdateResponse, diff --git a/src/resources/fine-tuning/alpha.ts b/src/resources/fine-tuning/alpha.ts new file mode 100644 index 000000000..446b6431e --- /dev/null +++ b/src/resources/fine-tuning/alpha.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './alpha/index'; diff --git a/src/resources/fine-tuning/alpha/alpha.ts b/src/resources/fine-tuning/alpha/alpha.ts new file mode 100644 index 000000000..77d695195 --- /dev/null +++ b/src/resources/fine-tuning/alpha/alpha.ts @@ -0,0 +1,27 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as GradersAPI from './graders'; +import { + GraderRunParams, + GraderRunResponse, + GraderValidateParams, + GraderValidateResponse, + Graders, +} from './graders'; + +export class Alpha extends APIResource { + graders: GradersAPI.Graders = new GradersAPI.Graders(this._client); +} + +Alpha.Graders = Graders; + +export declare namespace Alpha { + export { + Graders as Graders, + type GraderRunResponse as GraderRunResponse, + type GraderValidateResponse as GraderValidateResponse, + type GraderRunParams as GraderRunParams, + type GraderValidateParams as GraderValidateParams, + }; +} diff --git a/src/resources/fine-tuning/alpha/graders.ts b/src/resources/fine-tuning/alpha/graders.ts new file mode 100644 index 000000000..a9ef57f71 --- /dev/null +++ b/src/resources/fine-tuning/alpha/graders.ts @@ -0,0 +1,168 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; +import * as GraderModelsAPI from '../../graders/grader-models'; + +export class Graders extends APIResource { + /** + * Run a grader. + * + * @example + * ```ts + * const response = await client.fineTuning.alpha.graders.run({ + * grader: { + * input: 'input', + * name: 'name', + * operation: 'eq', + * reference: 'reference', + * type: 'string_check', + * }, + * model_sample: 'model_sample', + * reference_answer: 'string', + * }); + * ``` + */ + run(body: GraderRunParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/fine_tuning/alpha/graders/run', { body, ...options }); + } + + /** + * Validate a grader. + * + * @example + * ```ts + * const response = + * await client.fineTuning.alpha.graders.validate({ + * grader: { + * input: 'input', + * name: 'name', + * operation: 'eq', + * reference: 'reference', + * type: 'string_check', + * }, + * }); + * ``` + */ + validate( + body: GraderValidateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/fine_tuning/alpha/graders/validate', { body, ...options }); + } +} + +export interface GraderRunResponse { + metadata: GraderRunResponse.Metadata; + + model_grader_token_usage_per_model: Record; + + reward: number; + + sub_rewards: Record; +} + +export namespace GraderRunResponse { + export interface Metadata { + errors: Metadata.Errors; + + execution_time: number; + + name: string; + + sampled_model_name: string | null; + + scores: Record; + + token_usage: number | null; + + type: string; + } + + export namespace Metadata { + export interface Errors { + formula_parse_error: boolean; + + invalid_variable_error: boolean; + + model_grader_parse_error: boolean; + + model_grader_refusal_error: boolean; + + model_grader_server_error: boolean; + + model_grader_server_error_details: string | null; + + other_error: boolean; + + python_grader_runtime_error: boolean; + + python_grader_runtime_error_details: string | null; + + python_grader_server_error: boolean; + + python_grader_server_error_type: string | null; + + sample_parse_error: boolean; + + truncated_observation_error: boolean; + + unresponsive_reward_error: boolean; + } + } +} + +export interface GraderValidateResponse { + /** + * The grader used for the fine-tuning job. + */ + grader?: + | GraderModelsAPI.StringCheckGrader + | GraderModelsAPI.TextSimilarityGrader + | GraderModelsAPI.PythonGrader + | GraderModelsAPI.ScoreModelGrader + | GraderModelsAPI.MultiGrader; +} + +export interface GraderRunParams { + /** + * The grader used for the fine-tuning job. + */ + grader: + | GraderModelsAPI.StringCheckGrader + | GraderModelsAPI.TextSimilarityGrader + | GraderModelsAPI.PythonGrader + | GraderModelsAPI.ScoreModelGrader + | GraderModelsAPI.MultiGrader; + + /** + * The model sample to be evaluated. + */ + model_sample: string; + + /** + * The reference answer for the evaluation. + */ + reference_answer: string | unknown | Array | number; +} + +export interface GraderValidateParams { + /** + * The grader used for the fine-tuning job. + */ + grader: + | GraderModelsAPI.StringCheckGrader + | GraderModelsAPI.TextSimilarityGrader + | GraderModelsAPI.PythonGrader + | GraderModelsAPI.ScoreModelGrader + | GraderModelsAPI.MultiGrader; +} + +export declare namespace Graders { + export { + type GraderRunResponse as GraderRunResponse, + type GraderValidateResponse as GraderValidateResponse, + type GraderRunParams as GraderRunParams, + type GraderValidateParams as GraderValidateParams, + }; +} diff --git a/src/resources/fine-tuning/alpha/index.ts b/src/resources/fine-tuning/alpha/index.ts new file mode 100644 index 000000000..47b229bc3 --- /dev/null +++ b/src/resources/fine-tuning/alpha/index.ts @@ -0,0 +1,10 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Alpha } from './alpha'; +export { + Graders, + type GraderRunResponse, + type GraderValidateResponse, + type GraderRunParams, + type GraderValidateParams, +} from './graders'; diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index 9b0a01992..8fb54983b 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -1,6 +1,18 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import * as MethodsAPI from './methods'; +import { + DpoHyperparameters, + DpoMethod, + Methods, + ReinforcementHyperparameters, + ReinforcementMethod, + SupervisedHyperparameters, + SupervisedMethod, +} from './methods'; +import * as AlphaAPI from './alpha/alpha'; +import { Alpha } from './alpha/alpha'; import * as CheckpointsAPI from './checkpoints/checkpoints'; import { Checkpoints } from './checkpoints/checkpoints'; import * as JobsAPI from './jobs/jobs'; @@ -19,16 +31,30 @@ import { } from './jobs/jobs'; export class FineTuning extends APIResource { + methods: MethodsAPI.Methods = new MethodsAPI.Methods(this._client); jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); checkpoints: CheckpointsAPI.Checkpoints = new CheckpointsAPI.Checkpoints(this._client); + alpha: AlphaAPI.Alpha = new AlphaAPI.Alpha(this._client); } +FineTuning.Methods = Methods; FineTuning.Jobs = Jobs; FineTuning.FineTuningJobsPage = FineTuningJobsPage; FineTuning.FineTuningJobEventsPage = FineTuningJobEventsPage; FineTuning.Checkpoints = Checkpoints; +FineTuning.Alpha = Alpha; export declare namespace FineTuning { + export { + Methods as Methods, + type DpoHyperparameters as DpoHyperparameters, + type DpoMethod as DpoMethod, + type ReinforcementHyperparameters as ReinforcementHyperparameters, + type ReinforcementMethod as ReinforcementMethod, + type SupervisedHyperparameters as SupervisedHyperparameters, + type SupervisedMethod as SupervisedMethod, + }; + export { Jobs as Jobs, type FineTuningJob as FineTuningJob, @@ -44,4 +70,6 @@ export declare namespace FineTuning { }; export { Checkpoints as Checkpoints }; + + export { Alpha as Alpha }; } diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts index d23161c62..878ac402d 100644 --- a/src/resources/fine-tuning/index.ts +++ b/src/resources/fine-tuning/index.ts @@ -1,5 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export { Alpha } from './alpha/index'; export { Checkpoints } from './checkpoints/index'; export { FineTuning } from './fine-tuning'; export { @@ -15,3 +16,12 @@ export { type JobListParams, type JobListEventsParams, } from './jobs/index'; +export { + Methods, + type DpoHyperparameters, + type DpoMethod, + type ReinforcementHyperparameters, + type ReinforcementMethod, + type SupervisedHyperparameters, + type SupervisedMethod, +} from './methods'; diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 0bc812917..08616cd4f 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -3,6 +3,7 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; +import * as MethodsAPI from '../methods'; import * as CheckpointsAPI from './checkpoints'; import { CheckpointListParams, @@ -127,6 +128,34 @@ export class Jobs extends APIResource { ...options, }); } + + /** + * Pause a fine-tune job. + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.pause( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` + */ + pause(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/fine_tuning/jobs/${fineTuningJobId}/pause`, options); + } + + /** + * Resume a fine-tune job. + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.resume( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` + */ + resume(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/fine_tuning/jobs/${fineTuningJobId}/resume`, options); + } } export class FineTuningJobsPage extends CursorPage {} @@ -293,97 +322,24 @@ export namespace FineTuningJob { */ export interface Method { /** - * Configuration for the DPO fine-tuning method. - */ - dpo?: Method.Dpo; - - /** - * Configuration for the supervised fine-tuning method. + * The type of method. Is either `supervised`, `dpo`, or `reinforcement`. */ - supervised?: Method.Supervised; + type: 'supervised' | 'dpo' | 'reinforcement'; /** - * The type of method. Is either `supervised` or `dpo`. + * Configuration for the DPO fine-tuning method. */ - type?: 'supervised' | 'dpo'; - } + dpo?: MethodsAPI.DpoMethod; - export namespace Method { /** - * Configuration for the DPO fine-tuning method. + * Configuration for the reinforcement fine-tuning method. */ - export interface Dpo { - /** - * The hyperparameters used for the fine-tuning job. - */ - hyperparameters?: Dpo.Hyperparameters; - } - - export namespace Dpo { - /** - * The hyperparameters used for the fine-tuning job. - */ - export interface Hyperparameters { - /** - * Number of examples in each batch. A larger batch size means that model - * parameters are updated less frequently, but with lower variance. - */ - batch_size?: 'auto' | number; - - /** - * The beta value for the DPO method. A higher beta value will increase the weight - * of the penalty between the policy and reference model. - */ - beta?: 'auto' | number; - - /** - * Scaling factor for the learning rate. A smaller learning rate may be useful to - * avoid overfitting. - */ - learning_rate_multiplier?: 'auto' | number; - - /** - * The number of epochs to train the model for. An epoch refers to one full cycle - * through the training dataset. - */ - n_epochs?: 'auto' | number; - } - } + reinforcement?: MethodsAPI.ReinforcementMethod; /** * Configuration for the supervised fine-tuning method. */ - export interface Supervised { - /** - * The hyperparameters used for the fine-tuning job. - */ - hyperparameters?: Supervised.Hyperparameters; - } - - export namespace Supervised { - /** - * The hyperparameters used for the fine-tuning job. - */ - export interface Hyperparameters { - /** - * Number of examples in each batch. A larger batch size means that model - * parameters are updated less frequently, but with lower variance. - */ - batch_size?: 'auto' | number; - - /** - * Scaling factor for the learning rate. A smaller learning rate may be useful to - * avoid overfitting. - */ - learning_rate_multiplier?: 'auto' | number; - - /** - * The number of epochs to train the model for. An epoch refers to one full cycle - * through the training dataset. - */ - n_epochs?: 'auto' | number; - } - } + supervised?: MethodsAPI.SupervisedMethod; } } @@ -637,97 +593,24 @@ export namespace JobCreateParams { */ export interface Method { /** - * Configuration for the DPO fine-tuning method. - */ - dpo?: Method.Dpo; - - /** - * Configuration for the supervised fine-tuning method. + * The type of method. Is either `supervised`, `dpo`, or `reinforcement`. */ - supervised?: Method.Supervised; + type: 'supervised' | 'dpo' | 'reinforcement'; /** - * The type of method. Is either `supervised` or `dpo`. + * Configuration for the DPO fine-tuning method. */ - type?: 'supervised' | 'dpo'; - } + dpo?: MethodsAPI.DpoMethod; - export namespace Method { /** - * Configuration for the DPO fine-tuning method. + * Configuration for the reinforcement fine-tuning method. */ - export interface Dpo { - /** - * The hyperparameters used for the fine-tuning job. - */ - hyperparameters?: Dpo.Hyperparameters; - } - - export namespace Dpo { - /** - * The hyperparameters used for the fine-tuning job. - */ - export interface Hyperparameters { - /** - * Number of examples in each batch. A larger batch size means that model - * parameters are updated less frequently, but with lower variance. - */ - batch_size?: 'auto' | number; - - /** - * The beta value for the DPO method. A higher beta value will increase the weight - * of the penalty between the policy and reference model. - */ - beta?: 'auto' | number; - - /** - * Scaling factor for the learning rate. A smaller learning rate may be useful to - * avoid overfitting. - */ - learning_rate_multiplier?: 'auto' | number; - - /** - * The number of epochs to train the model for. An epoch refers to one full cycle - * through the training dataset. - */ - n_epochs?: 'auto' | number; - } - } + reinforcement?: MethodsAPI.ReinforcementMethod; /** * Configuration for the supervised fine-tuning method. */ - export interface Supervised { - /** - * The hyperparameters used for the fine-tuning job. - */ - hyperparameters?: Supervised.Hyperparameters; - } - - export namespace Supervised { - /** - * The hyperparameters used for the fine-tuning job. - */ - export interface Hyperparameters { - /** - * Number of examples in each batch. A larger batch size means that model - * parameters are updated less frequently, but with lower variance. - */ - batch_size?: 'auto' | number; - - /** - * Scaling factor for the learning rate. A smaller learning rate may be useful to - * avoid overfitting. - */ - learning_rate_multiplier?: 'auto' | number; - - /** - * The number of epochs to train the model for. An epoch refers to one full cycle - * through the training dataset. - */ - n_epochs?: 'auto' | number; - } - } + supervised?: MethodsAPI.SupervisedMethod; } } diff --git a/src/resources/fine-tuning/methods.ts b/src/resources/fine-tuning/methods.ts new file mode 100644 index 000000000..aa459c74c --- /dev/null +++ b/src/resources/fine-tuning/methods.ts @@ -0,0 +1,152 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as GraderModelsAPI from '../graders/grader-models'; + +export class Methods extends APIResource {} + +/** + * The hyperparameters used for the DPO fine-tuning job. + */ +export interface DpoHyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * The beta value for the DPO method. A higher beta value will increase the weight + * of the penalty between the policy and reference model. + */ + beta?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; +} + +/** + * Configuration for the DPO fine-tuning method. + */ +export interface DpoMethod { + /** + * The hyperparameters used for the DPO fine-tuning job. + */ + hyperparameters?: DpoHyperparameters; +} + +/** + * The hyperparameters used for the reinforcement fine-tuning job. + */ +export interface ReinforcementHyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * Multiplier on amount of compute used for exploring search space during training. + */ + compute_multiplier?: 'auto' | number; + + /** + * The number of training steps between evaluation runs. + */ + eval_interval?: 'auto' | number; + + /** + * Number of evaluation samples to generate per training step. + */ + eval_samples?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; + + /** + * Level of reasoning effort. + */ + reasoning_effort?: 'default' | 'low' | 'medium' | 'high'; +} + +/** + * Configuration for the reinforcement fine-tuning method. + */ +export interface ReinforcementMethod { + /** + * The grader used for the fine-tuning job. + */ + grader: + | GraderModelsAPI.StringCheckGrader + | GraderModelsAPI.TextSimilarityGrader + | GraderModelsAPI.PythonGrader + | GraderModelsAPI.ScoreModelGrader + | GraderModelsAPI.MultiGrader; + + /** + * The hyperparameters used for the reinforcement fine-tuning job. + */ + hyperparameters?: ReinforcementHyperparameters; +} + +/** + * The hyperparameters used for the fine-tuning job. + */ +export interface SupervisedHyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; +} + +/** + * Configuration for the supervised fine-tuning method. + */ +export interface SupervisedMethod { + /** + * The hyperparameters used for the fine-tuning job. + */ + hyperparameters?: SupervisedHyperparameters; +} + +export declare namespace Methods { + export { + type DpoHyperparameters as DpoHyperparameters, + type DpoMethod as DpoMethod, + type ReinforcementHyperparameters as ReinforcementHyperparameters, + type ReinforcementMethod as ReinforcementMethod, + type SupervisedHyperparameters as SupervisedHyperparameters, + type SupervisedMethod as SupervisedMethod, + }; +} diff --git a/src/resources/graders.ts b/src/resources/graders.ts new file mode 100644 index 000000000..2ea9aa959 --- /dev/null +++ b/src/resources/graders.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './graders/index'; diff --git a/src/resources/graders/grader-models.ts b/src/resources/graders/grader-models.ts new file mode 100644 index 000000000..9ee08f75f --- /dev/null +++ b/src/resources/graders/grader-models.ts @@ -0,0 +1,296 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as ResponsesAPI from '../responses/responses'; + +export class GraderModels extends APIResource {} + +/** + * A LabelModelGrader object which uses a model to assign labels to each item in + * the evaluation. + */ +export interface LabelModelGrader { + input: Array; + + /** + * The labels to assign to each item in the evaluation. + */ + labels: Array; + + /** + * The model to use for the evaluation. Must support structured outputs. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The labels that indicate a passing result. Must be a subset of labels. + */ + passing_labels: Array; + + /** + * The object type, which is always `label_model`. + */ + type: 'label_model'; +} + +export namespace LabelModelGrader { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } +} + +/** + * A MultiGrader object combines the output of multiple graders to produce a single + * score. + */ +export interface MultiGrader { + /** + * A formula to calculate the output based on grader results. + */ + calculate_output: string; + + graders: Record< + string, + StringCheckGrader | TextSimilarityGrader | PythonGrader | ScoreModelGrader | LabelModelGrader + >; + + /** + * The name of the grader. + */ + name: string; + + /** + * The type of grader. + */ + type: 'multi'; +} + +/** + * A PythonGrader object that runs a python script on the input. + */ +export interface PythonGrader { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; +} + +/** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ +export interface ScoreModelGrader { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; +} + +export namespace ScoreModelGrader { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } +} + +/** + * A StringCheckGrader object that performs a string comparison between input and + * reference using a specified operation. + */ +export interface StringCheckGrader { + /** + * The input text. This may include template strings. + */ + input: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + */ + operation: 'eq' | 'ne' | 'like' | 'ilike'; + + /** + * The reference text. This may include template strings. + */ + reference: string; + + /** + * The object type, which is always `string_check`. + */ + type: 'string_check'; +} + +/** + * A TextSimilarityGrader object which grades text based on similarity metrics. + */ +export interface TextSimilarityGrader { + /** + * The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, + * `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + */ + evaluation_metric: + | 'fuzzy_match' + | 'bleu' + | 'gleu' + | 'meteor' + | 'rouge_1' + | 'rouge_2' + | 'rouge_3' + | 'rouge_4' + | 'rouge_5' + | 'rouge_l'; + + /** + * The text being graded. + */ + input: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The text being graded against. + */ + reference: string; + + /** + * The type of grader. + */ + type: 'text_similarity'; +} + +export declare namespace GraderModels { + export { + type LabelModelGrader as LabelModelGrader, + type MultiGrader as MultiGrader, + type PythonGrader as PythonGrader, + type ScoreModelGrader as ScoreModelGrader, + type StringCheckGrader as StringCheckGrader, + type TextSimilarityGrader as TextSimilarityGrader, + }; +} diff --git a/src/resources/graders/graders.ts b/src/resources/graders/graders.ts new file mode 100644 index 000000000..de3297450 --- /dev/null +++ b/src/resources/graders/graders.ts @@ -0,0 +1,31 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as GraderModelsAPI from './grader-models'; +import { + GraderModels, + LabelModelGrader, + MultiGrader, + PythonGrader, + ScoreModelGrader, + StringCheckGrader, + TextSimilarityGrader, +} from './grader-models'; + +export class Graders extends APIResource { + graderModels: GraderModelsAPI.GraderModels = new GraderModelsAPI.GraderModels(this._client); +} + +Graders.GraderModels = GraderModels; + +export declare namespace Graders { + export { + GraderModels as GraderModels, + type LabelModelGrader as LabelModelGrader, + type MultiGrader as MultiGrader, + type PythonGrader as PythonGrader, + type ScoreModelGrader as ScoreModelGrader, + type StringCheckGrader as StringCheckGrader, + type TextSimilarityGrader as TextSimilarityGrader, + }; +} diff --git a/src/resources/graders/index.ts b/src/resources/graders/index.ts new file mode 100644 index 000000000..82d557a6a --- /dev/null +++ b/src/resources/graders/index.ts @@ -0,0 +1,12 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + GraderModels, + type LabelModelGrader, + type MultiGrader, + type PythonGrader, + type ScoreModelGrader, + type StringCheckGrader, + type TextSimilarityGrader, +} from './grader-models'; +export { Graders } from './graders'; diff --git a/src/resources/index.ts b/src/resources/index.ts index 0d8ec9220..9d827615c 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -33,10 +33,7 @@ export { EvalListResponsesPage, Evals, type EvalCustomDataSourceConfig, - type EvalLabelModelGrader, type EvalStoredCompletionsDataSourceConfig, - type EvalStringCheckGrader, - type EvalTextSimilarityGrader, type EvalCreateResponse, type EvalRetrieveResponse, type EvalUpdateResponse, @@ -57,6 +54,7 @@ export { type FileListParams, } from './files'; export { FineTuning } from './fine-tuning/fine-tuning'; +export { Graders } from './graders/graders'; export { Images, type Image, diff --git a/tests/api-resources/fine-tuning/alpha/graders.test.ts b/tests/api-resources/fine-tuning/alpha/graders.test.ts new file mode 100644 index 000000000..8e47a4c42 --- /dev/null +++ b/tests/api-resources/fine-tuning/alpha/graders.test.ts @@ -0,0 +1,53 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource graders', () => { + test('run: only required params', async () => { + const responsePromise = client.fineTuning.alpha.graders.run({ + grader: { input: 'input', name: 'name', operation: 'eq', reference: 'reference', type: 'string_check' }, + model_sample: 'model_sample', + reference_answer: 'string', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('run: required and optional params', async () => { + const response = await client.fineTuning.alpha.graders.run({ + grader: { input: 'input', name: 'name', operation: 'eq', reference: 'reference', type: 'string_check' }, + model_sample: 'model_sample', + reference_answer: 'string', + }); + }); + + test('validate: only required params', async () => { + const responsePromise = client.fineTuning.alpha.graders.validate({ + grader: { input: 'input', name: 'name', operation: 'eq', reference: 'reference', type: 'string_check' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('validate: required and optional params', async () => { + const response = await client.fineTuning.alpha.graders.validate({ + grader: { input: 'input', name: 'name', operation: 'eq', reference: 'reference', type: 'string_check' }, + }); + }); +}); diff --git a/tests/api-resources/fine-tuning/jobs/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts index 4de83a8b7..fe8c9efee 100644 --- a/tests/api-resources/fine-tuning/jobs/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -35,6 +35,7 @@ describe('resource jobs', () => { }, ], method: { + type: 'supervised', dpo: { hyperparameters: { batch_size: 'auto', @@ -43,10 +44,27 @@ describe('resource jobs', () => { n_epochs: 'auto', }, }, + reinforcement: { + grader: { + input: 'input', + name: 'name', + operation: 'eq', + reference: 'reference', + type: 'string_check', + }, + hyperparameters: { + batch_size: 'auto', + compute_multiplier: 'auto', + eval_interval: 'auto', + eval_samples: 'auto', + learning_rate_multiplier: 'auto', + n_epochs: 'auto', + reasoning_effort: 'default', + }, + }, supervised: { hyperparameters: { batch_size: 'auto', learning_rate_multiplier: 'auto', n_epochs: 'auto' }, }, - type: 'supervised', }, seed: 42, suffix: 'x', @@ -143,4 +161,40 @@ describe('resource jobs', () => { ), ).rejects.toThrow(OpenAI.NotFoundError); }); + + test('pause', async () => { + const responsePromise = client.fineTuning.jobs.pause('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('pause: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.jobs.pause('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('resume', async () => { + const responsePromise = client.fineTuning.jobs.resume('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('resume: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.jobs.resume('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); }); From bc9f15fc7d1f4acf625adc3603577b06d59cdc5c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 8 May 2025 17:24:55 +0000 Subject: [PATCH 526/533] release: 4.98.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 4e19f03d6..a279d9124 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.97.0" + ".": "4.98.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fa637742..2f1a39177 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 4.98.0 (2025-05-08) + +Full Changelog: [v4.97.0...v4.98.0](https://github.com/openai/openai-node/compare/v4.97.0...v4.98.0) + +### Features + +* **api:** Add reinforcement fine-tuning api support ([4aa7a79](https://github.com/openai/openai-node/commit/4aa7a7954c63caa26cc1640ace56093fe1cafa04)) + + +### Chores + +* **ci:** bump node version for release workflows ([2961f63](https://github.com/openai/openai-node/commit/2961f63c4d5b8ae8efdf8ea6581aa83c6b0f722e)) +* **internal:** fix formatting ([91a44fe](https://github.com/openai/openai-node/commit/91a44fe11c0847dc50d48a03a8d409ac4bece37a)) + + +### Documentation + +* add examples to tsdocs ([7d841b7](https://github.com/openai/openai-node/commit/7d841b7f98eb542a398fb9de12056125e8d6cb22)) + ## 4.97.0 (2025-05-02) Full Changelog: [v4.96.2...v4.97.0](https://github.com/openai/openai-node/compare/v4.96.2...v4.97.0) diff --git a/jsr.json b/jsr.json index fd3ca4a41..25bbc9ac2 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.97.0", + "version": "4.98.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 8b9281b35..d34efceb0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.97.0", + "version": "4.98.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index 97cbc5900..f64cc03ff 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.97.0'; // x-release-please-version +export const VERSION = '4.98.0'; // x-release-please-version From ea1d56c979ad7136aa584a773904b0570ba14783 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 21:29:14 +0000 Subject: [PATCH 527/533] feat(api): responses x eval api --- .stats.yml | 6 +- api.md | 2 + src/index.ts | 2 + src/resources/audio/transcriptions.ts | 34 + src/resources/embeddings.ts | 7 +- src/resources/evals/evals.ts | 89 +- src/resources/evals/index.ts | 2 + src/resources/evals/runs/index.ts | 1 + src/resources/evals/runs/runs.ts | 1444 +++-------------- src/resources/fine-tuning/jobs/jobs.ts | 2 +- src/resources/index.ts | 1 + .../audio/transcriptions.test.ts | 1 + 12 files changed, 375 insertions(+), 1216 deletions(-) diff --git a/.stats.yml b/.stats.yml index 5f1bee851..11ba2b010 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-794a6ed3c3d3d77887564755168056af8a426b17cf1ec721e3a300503dc22a41.yml -openapi_spec_hash: 25a81c220713cd5b0bafc221d1dfa79a -config_hash: 0b768ed1b56c6d82816f0fa40dc4aaf5 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml +openapi_spec_hash: 602e14add4bee018c6774e320ce309b8 +config_hash: 7da27f7260075e8813ddcea542fba1bf diff --git a/api.md b/api.md index cad696e7e..665dfaeed 100644 --- a/api.md +++ b/api.md @@ -709,6 +709,7 @@ Methods: Types: - EvalCustomDataSourceConfig +- EvalLogsDataSourceConfig - EvalStoredCompletionsDataSourceConfig - EvalCreateResponse - EvalRetrieveResponse @@ -730,6 +731,7 @@ Types: - CreateEvalCompletionsRunDataSource - CreateEvalJSONLRunDataSource +- CreateEvalResponsesRunDataSource - EvalAPIError - RunCreateResponse - RunRetrieveResponse diff --git a/src/index.ts b/src/index.ts index 537c18f43..b51da51c5 100644 --- a/src/index.ts +++ b/src/index.ts @@ -74,6 +74,7 @@ import { EvalListParams, EvalListResponse, EvalListResponsesPage, + EvalLogsDataSourceConfig, EvalRetrieveResponse, EvalStoredCompletionsDataSourceConfig, EvalUpdateParams, @@ -533,6 +534,7 @@ export declare namespace OpenAI { export { Evals as Evals, type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, + type EvalLogsDataSourceConfig as EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse as EvalCreateResponse, type EvalRetrieveResponse as EvalRetrieveResponse, diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 8d563e0ba..9e5310874 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -324,6 +324,14 @@ export interface TranscriptionCreateParamsBase< */ model: (string & {}) | AudioAPI.AudioModel; + /** + * Controls how the audio is cut into chunks. When set to `"auto"`, the server + * first normalizes loudness and then uses voice activity detection (VAD) to choose + * boundaries. `server_vad` object can be provided to tweak VAD detection + * parameters manually. If unset, the audio is transcribed as a single block. + */ + chunking_strategy?: 'auto' | TranscriptionCreateParams.VadConfig | null; + /** * Additional information to include in the transcription response. `logprobs` will * return the log probabilities of the tokens in the response to understand the @@ -387,6 +395,32 @@ export interface TranscriptionCreateParamsBase< } export namespace TranscriptionCreateParams { + export interface VadConfig { + /** + * Must be set to `server_vad` to enable manual chunking using server side VAD. + */ + type: 'server_vad'; + + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). With shorter values + * the model will respond more quickly, but may jump in on short pauses from the + * user. + */ + silence_duration_ms?: number; + + /** + * Sensitivity threshold (0.0 to 1.0) for voice activity detection. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + } + export type TranscriptionCreateParamsNonStreaming = TranscriptionsAPI.TranscriptionCreateParamsNonStreaming; export type TranscriptionCreateParamsStreaming = TranscriptionsAPI.TranscriptionCreateParamsStreaming; } diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index cc040abff..fb02a7654 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -130,11 +130,12 @@ export interface EmbeddingCreateParams { * Input text to embed, encoded as a string or array of tokens. To embed multiple * inputs in a single request, pass an array of strings or array of token arrays. * The input must not exceed the max input tokens for the model (8192 tokens for - * `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + * all embedding models), cannot be an empty string, and any array must be 2048 * dimensions or less. * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - * for counting tokens. Some models may also impose a limit on total number of - * tokens summed across inputs. + * for counting tokens. In addition to the per-input token limit, all embedding + * models enforce a maximum of 300,000 tokens summed across all inputs in a single + * request. */ input: string | Array | Array | Array>; diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index 0f7166df4..396747af2 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -10,6 +10,7 @@ import * as RunsAPI from './runs/runs'; import { CreateEvalCompletionsRunDataSource, CreateEvalJSONLRunDataSource, + CreateEvalResponsesRunDataSource, EvalAPIError, RunCancelResponse, RunCreateParams, @@ -105,11 +106,37 @@ export interface EvalCustomDataSourceConfig { } /** - * A StoredCompletionsDataSourceConfig which specifies the metadata property of - * your stored completions query. This is usually metadata like `usecase=chatbot` - * or `prompt-version=v2`, etc. The schema returned by this data source config is - * used to defined what variables are available in your evals. `item` and `sample` - * are both defined when using this data source config. + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ +export interface EvalLogsDataSourceConfig { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; +} + +/** + * @deprecated Deprecated in favor of LogsDataSourceConfig. */ export interface EvalStoredCompletionsDataSourceConfig { /** @@ -119,9 +146,9 @@ export interface EvalStoredCompletionsDataSourceConfig { schema: Record; /** - * The type of data source. Always `stored_completions`. + * The type of data source. Always `stored-completions`. */ - type: 'stored_completions'; + type: 'stored-completions'; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -156,7 +183,10 @@ export interface EvalCreateResponse { /** * Configuration of data sources used in runs of the evaluation. */ - data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + data_source_config: + | EvalCustomDataSourceConfig + | EvalLogsDataSourceConfig + | EvalStoredCompletionsDataSourceConfig; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -244,7 +274,10 @@ export interface EvalRetrieveResponse { /** * Configuration of data sources used in runs of the evaluation. */ - data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + data_source_config: + | EvalCustomDataSourceConfig + | EvalLogsDataSourceConfig + | EvalStoredCompletionsDataSourceConfig; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -332,7 +365,10 @@ export interface EvalUpdateResponse { /** * Configuration of data sources used in runs of the evaluation. */ - data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + data_source_config: + | EvalCustomDataSourceConfig + | EvalLogsDataSourceConfig + | EvalStoredCompletionsDataSourceConfig; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -420,7 +456,10 @@ export interface EvalListResponse { /** * Configuration of data sources used in runs of the evaluation. */ - data_source_config: EvalCustomDataSourceConfig | EvalStoredCompletionsDataSourceConfig; + data_source_config: + | EvalCustomDataSourceConfig + | EvalLogsDataSourceConfig + | EvalStoredCompletionsDataSourceConfig; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -498,7 +537,7 @@ export interface EvalCreateParams { /** * The configuration for the data source used for the evaluation runs. */ - data_source_config: EvalCreateParams.Custom | EvalCreateParams.StoredCompletions; + data_source_config: EvalCreateParams.Custom | EvalCreateParams.Logs | EvalCreateParams.StoredCompletions; /** * A list of graders for all eval runs in this group. @@ -555,15 +594,29 @@ export namespace EvalCreateParams { } /** - * A data source config which specifies the metadata property of your stored - * completions query. This is usually metadata like `usecase=chatbot` or - * `prompt-version=v2`, etc. + * A data source config which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + */ + export interface Logs { + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Metadata filters for the logs data source. + */ + metadata?: Record; + } + + /** + * Deprecated in favor of LogsDataSourceConfig. */ export interface StoredCompletions { /** - * The type of data source. Always `stored_completions`. + * The type of data source. Always `stored-completions`. */ - type: 'stored_completions'; + type: 'stored-completions'; /** * Metadata filters for the stored completions data source. @@ -733,6 +786,7 @@ Evals.RunListResponsesPage = RunListResponsesPage; export declare namespace Evals { export { type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, + type EvalLogsDataSourceConfig as EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse as EvalCreateResponse, type EvalRetrieveResponse as EvalRetrieveResponse, @@ -749,6 +803,7 @@ export declare namespace Evals { Runs as Runs, type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, + type CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource, type EvalAPIError as EvalAPIError, type RunCreateResponse as RunCreateResponse, type RunRetrieveResponse as RunRetrieveResponse, diff --git a/src/resources/evals/index.ts b/src/resources/evals/index.ts index b2627fbf3..856a4088a 100644 --- a/src/resources/evals/index.ts +++ b/src/resources/evals/index.ts @@ -4,6 +4,7 @@ export { EvalListResponsesPage, Evals, type EvalCustomDataSourceConfig, + type EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse, type EvalRetrieveResponse, @@ -19,6 +20,7 @@ export { Runs, type CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource, + type CreateEvalResponsesRunDataSource, type EvalAPIError, type RunCreateResponse, type RunRetrieveResponse, diff --git a/src/resources/evals/runs/index.ts b/src/resources/evals/runs/index.ts index d0e18bff4..2e5d1a884 100644 --- a/src/resources/evals/runs/index.ts +++ b/src/resources/evals/runs/index.ts @@ -12,6 +12,7 @@ export { Runs, type CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource, + type CreateEvalResponsesRunDataSource, type EvalAPIError, type RunCreateResponse, type RunRetrieveResponse, diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts index 50c07a514..9aec3a1c6 100644 --- a/src/resources/evals/runs/runs.ts +++ b/src/resources/evals/runs/runs.ts @@ -321,6 +321,242 @@ export namespace CreateEvalJSONLRunDataSource { } } +/** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ +export interface CreateEvalResponsesRunDataSource { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: + | CreateEvalResponsesRunDataSource.FileContent + | CreateEvalResponsesRunDataSource.FileID + | CreateEvalResponsesRunDataSource.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + input_messages?: CreateEvalResponsesRunDataSource.Template | CreateEvalResponsesRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams; +} + +export namespace CreateEvalResponsesRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } +} + /** * An object representing an error response from the Eval API. */ @@ -356,7 +592,7 @@ export interface RunCreateResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | RunCreateResponse.Completions; + | CreateEvalResponsesRunDataSource; /** * An object representing an error response from the Eval API. @@ -419,241 +655,7 @@ export interface RunCreateResponse { status: string; } -export namespace RunCreateResponse { - /** - * A ResponsesRunDataSource object describing a model sampling configuration. - */ - export interface Completions { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - source: Completions.FileContent | Completions.FileID | Completions.Responses; - - /** - * The type of run data source. Always `completions`. - */ - type: 'completions'; - - input_messages?: Completions.Template | Completions.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model?: string; - - sampling_params?: Completions.SamplingParams; - } - - export namespace Completions { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - export interface Responses { - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - /** - * Whether to allow parallel tool calls. This is a query parameter used to select - * responses. - */ - allow_parallel_tool_calls?: boolean | null; - - /** - * Only include items created after this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_after?: number | null; - - /** - * Only include items created before this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_before?: number | null; - - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - - /** - * Optional search string for instructions. This is a query parameter used to - * select responses. - */ - instructions_search?: string | null; - - /** - * Metadata filter for the responses. This is a query parameter used to select - * responses. - */ - metadata?: unknown | null; - - /** - * The name of the model to find responses for. This is a query parameter used to - * select responses. - */ - model?: string | null; - - /** - * Optional reasoning effort parameter. This is a query parameter used to select - * responses. - */ - reasoning_effort?: Shared.ReasoningEffort | null; - - /** - * Sampling temperature. This is a query parameter used to select responses. - */ - temperature?: number | null; - - /** - * Nucleus sampling parameter. This is a query parameter used to select responses. - */ - top_p?: number | null; - - /** - * List of user identifiers. This is a query parameter used to select responses. - */ - users?: Array | null; - } - - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - - export interface SamplingParams { - /** - * The maximum number of tokens in the generated output. - */ - max_completion_tokens?: number; - - /** - * A seed value to initialize the randomness, during sampling. - */ - seed?: number; - - /** - * A higher temperature increases randomness in the outputs. - */ - temperature?: number; - - /** - * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. - */ - top_p?: number; - } - } - +export namespace RunCreateResponse { export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -749,7 +751,7 @@ export interface RunRetrieveResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | RunRetrieveResponse.Completions; + | CreateEvalResponsesRunDataSource; /** * An object representing an error response from the Eval API. @@ -813,240 +815,6 @@ export interface RunRetrieveResponse { } export namespace RunRetrieveResponse { - /** - * A ResponsesRunDataSource object describing a model sampling configuration. - */ - export interface Completions { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - source: Completions.FileContent | Completions.FileID | Completions.Responses; - - /** - * The type of run data source. Always `completions`. - */ - type: 'completions'; - - input_messages?: Completions.Template | Completions.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model?: string; - - sampling_params?: Completions.SamplingParams; - } - - export namespace Completions { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - export interface Responses { - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - /** - * Whether to allow parallel tool calls. This is a query parameter used to select - * responses. - */ - allow_parallel_tool_calls?: boolean | null; - - /** - * Only include items created after this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_after?: number | null; - - /** - * Only include items created before this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_before?: number | null; - - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - - /** - * Optional search string for instructions. This is a query parameter used to - * select responses. - */ - instructions_search?: string | null; - - /** - * Metadata filter for the responses. This is a query parameter used to select - * responses. - */ - metadata?: unknown | null; - - /** - * The name of the model to find responses for. This is a query parameter used to - * select responses. - */ - model?: string | null; - - /** - * Optional reasoning effort parameter. This is a query parameter used to select - * responses. - */ - reasoning_effort?: Shared.ReasoningEffort | null; - - /** - * Sampling temperature. This is a query parameter used to select responses. - */ - temperature?: number | null; - - /** - * Nucleus sampling parameter. This is a query parameter used to select responses. - */ - top_p?: number | null; - - /** - * List of user identifiers. This is a query parameter used to select responses. - */ - users?: Array | null; - } - - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - - export interface SamplingParams { - /** - * The maximum number of tokens in the generated output. - */ - max_completion_tokens?: number; - - /** - * A seed value to initialize the randomness, during sampling. - */ - seed?: number; - - /** - * A higher temperature increases randomness in the outputs. - */ - temperature?: number; - - /** - * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. - */ - top_p?: number; - } - } - export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -1142,7 +910,7 @@ export interface RunListResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | RunListResponse.Completions; + | CreateEvalResponsesRunDataSource; /** * An object representing an error response from the Eval API. @@ -1196,250 +964,16 @@ export interface RunListResponse { /** * Counters summarizing the outcomes of the evaluation run. - */ - result_counts: RunListResponse.ResultCounts; - - /** - * The status of the evaluation run. - */ - status: string; -} - -export namespace RunListResponse { - /** - * A ResponsesRunDataSource object describing a model sampling configuration. - */ - export interface Completions { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - source: Completions.FileContent | Completions.FileID | Completions.Responses; - - /** - * The type of run data source. Always `completions`. - */ - type: 'completions'; - - input_messages?: Completions.Template | Completions.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model?: string; - - sampling_params?: Completions.SamplingParams; - } - - export namespace Completions { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - export interface Responses { - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - /** - * Whether to allow parallel tool calls. This is a query parameter used to select - * responses. - */ - allow_parallel_tool_calls?: boolean | null; - - /** - * Only include items created after this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_after?: number | null; - - /** - * Only include items created before this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_before?: number | null; - - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - - /** - * Optional search string for instructions. This is a query parameter used to - * select responses. - */ - instructions_search?: string | null; - - /** - * Metadata filter for the responses. This is a query parameter used to select - * responses. - */ - metadata?: unknown | null; - - /** - * The name of the model to find responses for. This is a query parameter used to - * select responses. - */ - model?: string | null; - - /** - * Optional reasoning effort parameter. This is a query parameter used to select - * responses. - */ - reasoning_effort?: Shared.ReasoningEffort | null; - - /** - * Sampling temperature. This is a query parameter used to select responses. - */ - temperature?: number | null; - - /** - * Nucleus sampling parameter. This is a query parameter used to select responses. - */ - top_p?: number | null; - - /** - * List of user identifiers. This is a query parameter used to select responses. - */ - users?: Array | null; - } - - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - - export interface SamplingParams { - /** - * The maximum number of tokens in the generated output. - */ - max_completion_tokens?: number; - - /** - * A seed value to initialize the randomness, during sampling. - */ - seed?: number; - - /** - * A higher temperature increases randomness in the outputs. - */ - temperature?: number; - - /** - * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. - */ - top_p?: number; - } - } + */ + result_counts: RunListResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} +export namespace RunListResponse { export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -1543,7 +1077,7 @@ export interface RunCancelResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | RunCancelResponse.Completions; + | CreateEvalResponsesRunDataSource; /** * An object representing an error response from the Eval API. @@ -1607,240 +1141,6 @@ export interface RunCancelResponse { } export namespace RunCancelResponse { - /** - * A ResponsesRunDataSource object describing a model sampling configuration. - */ - export interface Completions { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - source: Completions.FileContent | Completions.FileID | Completions.Responses; - - /** - * The type of run data source. Always `completions`. - */ - type: 'completions'; - - input_messages?: Completions.Template | Completions.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model?: string; - - sampling_params?: Completions.SamplingParams; - } - - export namespace Completions { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - export interface Responses { - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - /** - * Whether to allow parallel tool calls. This is a query parameter used to select - * responses. - */ - allow_parallel_tool_calls?: boolean | null; - - /** - * Only include items created after this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_after?: number | null; - - /** - * Only include items created before this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_before?: number | null; - - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - - /** - * Optional search string for instructions. This is a query parameter used to - * select responses. - */ - instructions_search?: string | null; - - /** - * Metadata filter for the responses. This is a query parameter used to select - * responses. - */ - metadata?: unknown | null; - - /** - * The name of the model to find responses for. This is a query parameter used to - * select responses. - */ - model?: string | null; - - /** - * Optional reasoning effort parameter. This is a query parameter used to select - * responses. - */ - reasoning_effort?: Shared.ReasoningEffort | null; - - /** - * Sampling temperature. This is a query parameter used to select responses. - */ - temperature?: number | null; - - /** - * Nucleus sampling parameter. This is a query parameter used to select responses. - */ - top_p?: number | null; - - /** - * List of user identifiers. This is a query parameter used to select responses. - */ - users?: Array | null; - } - - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - - export interface SamplingParams { - /** - * The maximum number of tokens in the generated output. - */ - max_completion_tokens?: number; - - /** - * A seed value to initialize the randomness, during sampling. - */ - seed?: number; - - /** - * A higher temperature increases randomness in the outputs. - */ - temperature?: number; - - /** - * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. - */ - top_p?: number; - } - } - export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -1923,7 +1223,7 @@ export interface RunCreateParams { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | RunCreateParams.CreateEvalResponsesRunDataSource; + | CreateEvalResponsesRunDataSource; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -1941,247 +1241,6 @@ export interface RunCreateParams { name?: string; } -export namespace RunCreateParams { - /** - * A ResponsesRunDataSource object describing a model sampling configuration. - */ - export interface CreateEvalResponsesRunDataSource { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - source: - | CreateEvalResponsesRunDataSource.FileContent - | CreateEvalResponsesRunDataSource.FileID - | CreateEvalResponsesRunDataSource.Responses; - - /** - * The type of run data source. Always `completions`. - */ - type: 'completions'; - - input_messages?: - | CreateEvalResponsesRunDataSource.Template - | CreateEvalResponsesRunDataSource.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model?: string; - - sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams; - } - - export namespace CreateEvalResponsesRunDataSource { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - export interface Responses { - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - /** - * Whether to allow parallel tool calls. This is a query parameter used to select - * responses. - */ - allow_parallel_tool_calls?: boolean | null; - - /** - * Only include items created after this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_after?: number | null; - - /** - * Only include items created before this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_before?: number | null; - - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - - /** - * Optional search string for instructions. This is a query parameter used to - * select responses. - */ - instructions_search?: string | null; - - /** - * Metadata filter for the responses. This is a query parameter used to select - * responses. - */ - metadata?: unknown | null; - - /** - * The name of the model to find responses for. This is a query parameter used to - * select responses. - */ - model?: string | null; - - /** - * Optional reasoning effort parameter. This is a query parameter used to select - * responses. - */ - reasoning_effort?: Shared.ReasoningEffort | null; - - /** - * Sampling temperature. This is a query parameter used to select responses. - */ - temperature?: number | null; - - /** - * Nucleus sampling parameter. This is a query parameter used to select responses. - */ - top_p?: number | null; - - /** - * List of user identifiers. This is a query parameter used to select responses. - */ - users?: Array | null; - } - - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; - - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; - } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } - } - - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - - export interface SamplingParams { - /** - * The maximum number of tokens in the generated output. - */ - max_completion_tokens?: number; - - /** - * A seed value to initialize the randomness, during sampling. - */ - seed?: number; - - /** - * A higher temperature increases randomness in the outputs. - */ - temperature?: number; - - /** - * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. - */ - top_p?: number; - } - } -} - export interface RunListParams extends CursorPageParams { /** * Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for @@ -2204,6 +1263,7 @@ export declare namespace Runs { export { type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, + type CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource, type EvalAPIError as EvalAPIError, type RunCreateResponse as RunCreateResponse, type RunRetrieveResponse as RunRetrieveResponse, diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 08616cd4f..cc5f55e9a 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -302,7 +302,7 @@ export namespace FineTuningJob { * Number of examples in each batch. A larger batch size means that model * parameters are updated less frequently, but with lower variance. */ - batch_size?: 'auto' | number; + batch_size?: unknown | 'auto' | number | null; /** * Scaling factor for the learning rate. A smaller learning rate may be useful to diff --git a/src/resources/index.ts b/src/resources/index.ts index 9d827615c..74d585595 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -33,6 +33,7 @@ export { EvalListResponsesPage, Evals, type EvalCustomDataSourceConfig, + type EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse, type EvalRetrieveResponse, diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts index 2297677b4..ad76808d0 100644 --- a/tests/api-resources/audio/transcriptions.test.ts +++ b/tests/api-resources/audio/transcriptions.test.ts @@ -27,6 +27,7 @@ describe('resource transcriptions', () => { const response = await client.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), model: 'gpt-4o-transcribe', + chunking_strategy: 'auto', include: ['logprobs'], language: 'language', prompt: 'prompt', From e83286b10b20d3e4c02903739b045af5cbf71cde Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 23:47:30 +0000 Subject: [PATCH 528/533] feat(api): manual updates --- .stats.yml | 2 +- api.md | 5 + src/index.ts | 3 + src/resources/beta/beta.ts | 2 + src/resources/beta/index.ts | 1 + src/resources/beta/threads/index.ts | 1 + src/resources/beta/threads/runs/runs.ts | 44 +--- src/resources/beta/threads/threads.ts | 43 ++-- src/resources/evals/evals.ts | 49 +--- src/resources/evals/index.ts | 2 + src/resources/evals/runs/index.ts | 2 + src/resources/evals/runs/runs.ts | 233 ++++--------------- src/resources/graders/grader-models.ts | 94 +------- src/resources/index.ts | 1 + src/resources/shared.ts | 44 ++++ src/resources/vector-stores/index.ts | 1 + src/resources/vector-stores/vector-stores.ts | 75 ++---- 17 files changed, 155 insertions(+), 447 deletions(-) diff --git a/.stats.yml b/.stats.yml index 11ba2b010..202b915dc 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml openapi_spec_hash: 602e14add4bee018c6774e320ce309b8 -config_hash: 7da27f7260075e8813ddcea542fba1bf +config_hash: bdacc55eb995c15255ec82130eb8c3bb diff --git a/api.md b/api.md index 665dfaeed..c4170366f 100644 --- a/api.md +++ b/api.md @@ -7,6 +7,7 @@ Types: - ComparisonFilter - CompoundFilter - ErrorObject +- EvalItem - FunctionDefinition - FunctionParameters - Metadata @@ -304,6 +305,7 @@ Types: - StaticFileChunkingStrategyObjectParam - VectorStore - VectorStoreDeleted +- VectorStoreExpirationAfter - VectorStoreSearchResponse Methods: @@ -463,6 +465,7 @@ Types: - AssistantToolChoiceOption - Thread - ThreadDeleted +- TruncationObject Methods: @@ -733,6 +736,8 @@ Types: - CreateEvalJSONLRunDataSource - CreateEvalResponsesRunDataSource - EvalAPIError +- EvalJSONLFileContentSource +- EvalJSONLFileIDSource - RunCreateResponse - RunRetrieveResponse - RunListResponse diff --git a/src/index.ts b/src/index.ts index b51da51c5..c1612964a 100644 --- a/src/index.ts +++ b/src/index.ts @@ -101,6 +101,7 @@ import { VectorStore, VectorStoreCreateParams, VectorStoreDeleted, + VectorStoreExpirationAfter, VectorStoreListParams, VectorStoreSearchParams, VectorStoreSearchResponse, @@ -501,6 +502,7 @@ export declare namespace OpenAI { type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreExpirationAfter as VectorStoreExpirationAfter, type VectorStoreSearchResponse as VectorStoreSearchResponse, VectorStoresPage as VectorStoresPage, VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, @@ -552,6 +554,7 @@ export declare namespace OpenAI { export type ComparisonFilter = API.ComparisonFilter; export type CompoundFilter = API.CompoundFilter; export type ErrorObject = API.ErrorObject; + export type EvalItem = API.EvalItem; export type FunctionDefinition = API.FunctionDefinition; export type FunctionParameters = API.FunctionParameters; export type Metadata = API.Metadata; diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 6282d4593..c32159776 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -90,6 +90,7 @@ import { ThreadDeleted, ThreadUpdateParams, Threads, + TruncationObject, } from './threads/threads'; import { Chat } from './chat/chat'; @@ -188,6 +189,7 @@ export declare namespace Beta { type AssistantToolChoiceOption as AssistantToolChoiceOption, type Thread as Thread, type ThreadDeleted as ThreadDeleted, + type TruncationObject as TruncationObject, type ThreadCreateParams as ThreadCreateParams, type ThreadUpdateParams as ThreadUpdateParams, type ThreadCreateAndRunParams as ThreadCreateAndRunParams, diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index b9cef17cb..296fdba75 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -29,6 +29,7 @@ export { type AssistantToolChoiceOption, type Thread, type ThreadDeleted, + type TruncationObject, type ThreadCreateParams, type ThreadUpdateParams, type ThreadCreateAndRunParams, diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index f67a1edde..89d00dcf6 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -63,6 +63,7 @@ export { type AssistantToolChoiceOption, type Thread, type ThreadDeleted, + type TruncationObject, type ThreadCreateParams, type ThreadUpdateParams, type ThreadCreateAndRunParams, diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 25356df3c..608ef6481 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -564,7 +564,7 @@ export interface Run { * Controls for how a thread will be truncated prior to the run. Use this to * control the intial context window of the run. */ - truncation_strategy: Run.TruncationStrategy | null; + truncation_strategy: ThreadsAPI.TruncationObject | null; /** * Usage statistics related to the run. This value will be `null` if the run is not @@ -639,26 +639,6 @@ export namespace Run { } } - /** - * Controls for how a thread will be truncated prior to the run. Use this to - * control the intial context window of the run. - */ - export interface TruncationStrategy { - /** - * The truncation strategy to use for the thread. The default is `auto`. If set to - * `last_messages`, the thread will be truncated to the n most recent messages in - * the thread. When set to `auto`, messages in the middle of the thread will be - * dropped to fit the context length of the model, `max_prompt_tokens`. - */ - type: 'auto' | 'last_messages'; - - /** - * The number of most recent messages from the thread when constructing the context - * for the run. - */ - last_messages?: number | null; - } - /** * Usage statistics related to the run. This value will be `null` if the run is not * in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -862,7 +842,7 @@ export interface RunCreateParamsBase { * Body param: Controls for how a thread will be truncated prior to the run. Use * this to control the intial context window of the run. */ - truncation_strategy?: RunCreateParams.TruncationStrategy | null; + truncation_strategy?: ThreadsAPI.TruncationObject | null; } export namespace RunCreateParams { @@ -921,26 +901,6 @@ export namespace RunCreateParams { } } - /** - * Controls for how a thread will be truncated prior to the run. Use this to - * control the intial context window of the run. - */ - export interface TruncationStrategy { - /** - * The truncation strategy to use for the thread. The default is `auto`. If set to - * `last_messages`, the thread will be truncated to the n most recent messages in - * the thread. When set to `auto`, messages in the middle of the thread will be - * dropped to fit the context length of the model, `max_prompt_tokens`. - */ - type: 'auto' | 'last_messages'; - - /** - * The number of most recent messages from the thread when constructing the context - * for the run. - */ - last_messages?: number | null; - } - export type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; export type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index c0c6bc8e4..8b0332fb8 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -346,6 +346,26 @@ export interface ThreadDeleted { object: 'thread.deleted'; } +/** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ +export interface TruncationObject { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; +} + export interface ThreadCreateParams { /** * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to @@ -734,7 +754,7 @@ export interface ThreadCreateAndRunParamsBase { * Controls for how a thread will be truncated prior to the run. Use this to * control the intial context window of the run. */ - truncation_strategy?: ThreadCreateAndRunParams.TruncationStrategy | null; + truncation_strategy?: TruncationObject | null; } export namespace ThreadCreateAndRunParams { @@ -965,26 +985,6 @@ export namespace ThreadCreateAndRunParams { } } - /** - * Controls for how a thread will be truncated prior to the run. Use this to - * control the intial context window of the run. - */ - export interface TruncationStrategy { - /** - * The truncation strategy to use for the thread. The default is `auto`. If set to - * `last_messages`, the thread will be truncated to the n most recent messages in - * the thread. When set to `auto`, messages in the middle of the thread will be - * dropped to fit the context length of the model, `max_prompt_tokens`. - */ - type: 'auto' | 'last_messages'; - - /** - * The number of most recent messages from the thread when constructing the context - * for the run. - */ - last_messages?: number | null; - } - export type ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; export type ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; } @@ -1684,6 +1684,7 @@ export declare namespace Threads { type AssistantToolChoiceOption as AssistantToolChoiceOption, type Thread as Thread, type ThreadDeleted as ThreadDeleted, + type TruncationObject as TruncationObject, type ThreadCreateParams as ThreadCreateParams, type ThreadUpdateParams as ThreadUpdateParams, type ThreadCreateAndRunParams as ThreadCreateAndRunParams, diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index 396747af2..5370c51b9 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -5,13 +5,14 @@ import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as Shared from '../shared'; import * as GraderModelsAPI from '../graders/grader-models'; -import * as ResponsesAPI from '../responses/responses'; import * as RunsAPI from './runs/runs'; import { CreateEvalCompletionsRunDataSource, CreateEvalJSONLRunDataSource, CreateEvalResponsesRunDataSource, EvalAPIError, + EvalJSONLFileContentSource, + EvalJSONLFileIDSource, RunCancelResponse, RunCreateParams, RunCreateResponse, @@ -633,7 +634,7 @@ export namespace EvalCreateParams { * A list of chat messages forming the prompt or context. May include variable * references to the "item" namespace, ie {{item.name}}. */ - input: Array; + input: Array; /** * The labels to classify to each item in the evaluation. @@ -673,48 +674,6 @@ export namespace EvalCreateParams { */ role: string; } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } /** @@ -805,6 +764,8 @@ export declare namespace Evals { type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, type CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource, type EvalAPIError as EvalAPIError, + type EvalJSONLFileContentSource as EvalJSONLFileContentSource, + type EvalJSONLFileIDSource as EvalJSONLFileIDSource, type RunCreateResponse as RunCreateResponse, type RunRetrieveResponse as RunRetrieveResponse, type RunListResponse as RunListResponse, diff --git a/src/resources/evals/index.ts b/src/resources/evals/index.ts index 856a4088a..084fc9ad6 100644 --- a/src/resources/evals/index.ts +++ b/src/resources/evals/index.ts @@ -22,6 +22,8 @@ export { type CreateEvalJSONLRunDataSource, type CreateEvalResponsesRunDataSource, type EvalAPIError, + type EvalJSONLFileContentSource, + type EvalJSONLFileIDSource, type RunCreateResponse, type RunRetrieveResponse, type RunListResponse, diff --git a/src/resources/evals/runs/index.ts b/src/resources/evals/runs/index.ts index 2e5d1a884..8e13e67df 100644 --- a/src/resources/evals/runs/index.ts +++ b/src/resources/evals/runs/index.ts @@ -14,6 +14,8 @@ export { type CreateEvalJSONLRunDataSource, type CreateEvalResponsesRunDataSource, type EvalAPIError, + type EvalJSONLFileContentSource, + type EvalJSONLFileIDSource, type RunCreateResponse, type RunRetrieveResponse, type RunListResponse, diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts index 9aec3a1c6..dec4dcb51 100644 --- a/src/resources/evals/runs/runs.ts +++ b/src/resources/evals/runs/runs.ts @@ -88,8 +88,8 @@ export interface CreateEvalCompletionsRunDataSource { * A StoredCompletionsRunDataSource configuration describing a set of filters */ source: - | CreateEvalCompletionsRunDataSource.FileContent - | CreateEvalCompletionsRunDataSource.FileID + | EvalJSONLFileContentSource + | EvalJSONLFileIDSource | CreateEvalCompletionsRunDataSource.StoredCompletions; /** @@ -110,38 +110,6 @@ export interface CreateEvalCompletionsRunDataSource { } export namespace CreateEvalCompletionsRunDataSource { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - /** * A StoredCompletionsRunDataSource configuration describing a set of filters */ @@ -187,7 +155,7 @@ export namespace CreateEvalCompletionsRunDataSource { * A list of chat messages forming the prompt or context. May include variable * references to the "item" namespace, ie {{item.name}}. */ - template: Array; + template: Array; /** * The type of input messages. Always `template`. @@ -195,50 +163,6 @@ export namespace CreateEvalCompletionsRunDataSource { type: 'template'; } - export namespace Template { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Message { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Message.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Message { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } - } - export interface ItemReference { /** * A reference to a variable in the "item" namespace. Ie, "item.name" @@ -279,7 +203,7 @@ export namespace CreateEvalCompletionsRunDataSource { * eval */ export interface CreateEvalJSONLRunDataSource { - source: CreateEvalJSONLRunDataSource.FileContent | CreateEvalJSONLRunDataSource.FileID; + source: EvalJSONLFileContentSource | EvalJSONLFileIDSource; /** * The type of data source. Always `jsonl`. @@ -287,40 +211,6 @@ export interface CreateEvalJSONLRunDataSource { type: 'jsonl'; } -export namespace CreateEvalJSONLRunDataSource { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } -} - /** * A ResponsesRunDataSource object describing a model sampling configuration. */ @@ -328,10 +218,7 @@ export interface CreateEvalResponsesRunDataSource { /** * A EvalResponsesSource object describing a run data source configuration. */ - source: - | CreateEvalResponsesRunDataSource.FileContent - | CreateEvalResponsesRunDataSource.FileID - | CreateEvalResponsesRunDataSource.Responses; + source: EvalJSONLFileContentSource | EvalJSONLFileIDSource | CreateEvalResponsesRunDataSource.Responses; /** * The type of run data source. Always `responses`. @@ -349,38 +236,6 @@ export interface CreateEvalResponsesRunDataSource { } export namespace CreateEvalResponsesRunDataSource { - export interface FileContent { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; - } - - export namespace FileContent { - export interface Content { - item: Record; - - sample?: Record; - } - } - - export interface FileID { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; - } - /** * A EvalResponsesSource object describing a run data source configuration. */ @@ -458,7 +313,7 @@ export namespace CreateEvalResponsesRunDataSource { * A list of chat messages forming the prompt or context. May include variable * references to the "item" namespace, ie {{item.name}}. */ - template: Array; + template: Array; /** * The type of input messages. Always `template`. @@ -478,48 +333,6 @@ export namespace CreateEvalResponsesRunDataSource { */ role: string; } - - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } } export interface ItemReference { @@ -572,6 +385,38 @@ export interface EvalAPIError { message: string; } +export interface EvalJSONLFileContentSource { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; +} + +export namespace EvalJSONLFileContentSource { + export interface Content { + item: Record; + + sample?: Record; + } +} + +export interface EvalJSONLFileIDSource { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; +} + /** * A schema representing an evaluation run. */ @@ -1265,6 +1110,8 @@ export declare namespace Runs { type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, type CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource, type EvalAPIError as EvalAPIError, + type EvalJSONLFileContentSource as EvalJSONLFileContentSource, + type EvalJSONLFileIDSource as EvalJSONLFileIDSource, type RunCreateResponse as RunCreateResponse, type RunRetrieveResponse as RunRetrieveResponse, type RunListResponse as RunListResponse, diff --git a/src/resources/graders/grader-models.ts b/src/resources/graders/grader-models.ts index 9ee08f75f..8a1a0eddd 100644 --- a/src/resources/graders/grader-models.ts +++ b/src/resources/graders/grader-models.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; -import * as ResponsesAPI from '../responses/responses'; +import * as Shared from '../shared'; export class GraderModels extends APIResource {} @@ -10,7 +10,7 @@ export class GraderModels extends APIResource {} * the evaluation. */ export interface LabelModelGrader { - input: Array; + input: Array; /** * The labels to assign to each item in the evaluation. @@ -38,50 +38,6 @@ export interface LabelModelGrader { type: 'label_model'; } -export namespace LabelModelGrader { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } -} - /** * A MultiGrader object combines the output of multiple graders to produce a single * score. @@ -140,7 +96,7 @@ export interface ScoreModelGrader { /** * The input text. This may include template strings. */ - input: Array; + input: Array; /** * The model to use for the evaluation. @@ -168,50 +124,6 @@ export interface ScoreModelGrader { sampling_params?: unknown; } -export namespace ScoreModelGrader { - /** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ - export interface Input { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | Input.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; - } - - export namespace Input { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } - } -} - /** * A StringCheckGrader object that performs a string comparison between input and * reference using a specified operation. diff --git a/src/resources/index.ts b/src/resources/index.ts index 74d585595..0f21e596c 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -91,6 +91,7 @@ export { type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, + type VectorStoreExpirationAfter, type VectorStoreSearchResponse, type VectorStoreCreateParams, type VectorStoreUpdateParams, diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 1c0006b18..d0c2aaa49 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,5 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +import * as ResponsesAPI from './responses/responses'; + export type AllModels = | (string & {}) | ChatModel @@ -118,6 +120,48 @@ export interface ErrorObject { type: string; } +/** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ +export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; +} + +export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } +} + export interface FunctionDefinition { /** * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain diff --git a/src/resources/vector-stores/index.ts b/src/resources/vector-stores/index.ts index 9cbcbc0b2..c3c042387 100644 --- a/src/resources/vector-stores/index.ts +++ b/src/resources/vector-stores/index.ts @@ -30,6 +30,7 @@ export { type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, + type VectorStoreExpirationAfter, type VectorStoreSearchResponse, type VectorStoreCreateParams, type VectorStoreUpdateParams, diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts index 7d61e7fd6..66438be02 100644 --- a/src/resources/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -249,7 +249,7 @@ export interface VectorStore { /** * The expiration policy for a vector store. */ - expires_after?: VectorStore.ExpiresAfter; + expires_after?: VectorStoreExpirationAfter; /** * The Unix timestamp (in seconds) for when the vector store will expire. @@ -284,22 +284,6 @@ export namespace VectorStore { */ total: number; } - - /** - * The expiration policy for a vector store. - */ - export interface ExpiresAfter { - /** - * Anchor timestamp after which the expiration policy applies. Supported anchors: - * `last_active_at`. - */ - anchor: 'last_active_at'; - - /** - * The number of days after the anchor time that the vector store will expire. - */ - days: number; - } } export interface VectorStoreDeleted { @@ -310,6 +294,22 @@ export interface VectorStoreDeleted { object: 'vector_store.deleted'; } +/** + * The expiration policy for a vector store. + */ +export interface VectorStoreExpirationAfter { + /** + * Anchor timestamp after which the expiration policy applies. Supported anchors: + * `last_active_at`. + */ + anchor: 'last_active_at'; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; +} + export interface VectorStoreSearchResponse { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -365,7 +365,7 @@ export interface VectorStoreCreateParams { /** * The expiration policy for a vector store. */ - expires_after?: VectorStoreCreateParams.ExpiresAfter; + expires_after?: VectorStoreExpirationAfter; /** * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that @@ -390,29 +390,11 @@ export interface VectorStoreCreateParams { name?: string; } -export namespace VectorStoreCreateParams { - /** - * The expiration policy for a vector store. - */ - export interface ExpiresAfter { - /** - * Anchor timestamp after which the expiration policy applies. Supported anchors: - * `last_active_at`. - */ - anchor: 'last_active_at'; - - /** - * The number of days after the anchor time that the vector store will expire. - */ - days: number; - } -} - export interface VectorStoreUpdateParams { /** * The expiration policy for a vector store. */ - expires_after?: VectorStoreUpdateParams.ExpiresAfter | null; + expires_after?: VectorStoreExpirationAfter | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -430,24 +412,6 @@ export interface VectorStoreUpdateParams { name?: string | null; } -export namespace VectorStoreUpdateParams { - /** - * The expiration policy for a vector store. - */ - export interface ExpiresAfter { - /** - * Anchor timestamp after which the expiration policy applies. Supported anchors: - * `last_active_at`. - */ - anchor: 'last_active_at'; - - /** - * The number of days after the anchor time that the vector store will expire. - */ - days: number; - } -} - export interface VectorStoreListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place @@ -521,6 +485,7 @@ export declare namespace VectorStores { type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreExpirationAfter as VectorStoreExpirationAfter, type VectorStoreSearchResponse as VectorStoreSearchResponse, VectorStoresPage as VectorStoresPage, VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, From 8cc63d351057678d474fe1a16e3077370c83fddb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 17:11:15 +0000 Subject: [PATCH 529/533] feat(api): Updating Assistants and Evals API schemas --- .stats.yml | 6 +- api.md | 7 - src/index.ts | 5 - src/resources/beta/beta.ts | 2 - src/resources/beta/index.ts | 1 - src/resources/beta/threads/index.ts | 1 - src/resources/beta/threads/runs/runs.ts | 44 +- src/resources/beta/threads/threads.ts | 43 +- src/resources/evals/evals.ts | 210 ++- src/resources/evals/index.ts | 4 - src/resources/evals/runs/index.ts | 3 - src/resources/evals/runs/runs.ts | 1511 +++++++++++++++--- src/resources/graders/grader-models.ts | 96 +- src/resources/index.ts | 2 - src/resources/shared.ts | 45 +- src/resources/vector-stores/index.ts | 1 - src/resources/vector-stores/vector-stores.ts | 75 +- 17 files changed, 1680 insertions(+), 376 deletions(-) diff --git a/.stats.yml b/.stats.yml index 202b915dc..a3c5d081d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml -openapi_spec_hash: 602e14add4bee018c6774e320ce309b8 -config_hash: bdacc55eb995c15255ec82130eb8c3bb +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5fa16b9a02985ae06e41be14946a9c325dc672fb014b3c19abca65880c6990e6.yml +openapi_spec_hash: da3e669f65130043b1170048c0727890 +config_hash: d8d5fda350f6db77c784f35429741a2e diff --git a/api.md b/api.md index c4170366f..cad696e7e 100644 --- a/api.md +++ b/api.md @@ -7,7 +7,6 @@ Types: - ComparisonFilter - CompoundFilter - ErrorObject -- EvalItem - FunctionDefinition - FunctionParameters - Metadata @@ -305,7 +304,6 @@ Types: - StaticFileChunkingStrategyObjectParam - VectorStore - VectorStoreDeleted -- VectorStoreExpirationAfter - VectorStoreSearchResponse Methods: @@ -465,7 +463,6 @@ Types: - AssistantToolChoiceOption - Thread - ThreadDeleted -- TruncationObject Methods: @@ -712,7 +709,6 @@ Methods: Types: - EvalCustomDataSourceConfig -- EvalLogsDataSourceConfig - EvalStoredCompletionsDataSourceConfig - EvalCreateResponse - EvalRetrieveResponse @@ -734,10 +730,7 @@ Types: - CreateEvalCompletionsRunDataSource - CreateEvalJSONLRunDataSource -- CreateEvalResponsesRunDataSource - EvalAPIError -- EvalJSONLFileContentSource -- EvalJSONLFileIDSource - RunCreateResponse - RunRetrieveResponse - RunListResponse diff --git a/src/index.ts b/src/index.ts index c1612964a..537c18f43 100644 --- a/src/index.ts +++ b/src/index.ts @@ -74,7 +74,6 @@ import { EvalListParams, EvalListResponse, EvalListResponsesPage, - EvalLogsDataSourceConfig, EvalRetrieveResponse, EvalStoredCompletionsDataSourceConfig, EvalUpdateParams, @@ -101,7 +100,6 @@ import { VectorStore, VectorStoreCreateParams, VectorStoreDeleted, - VectorStoreExpirationAfter, VectorStoreListParams, VectorStoreSearchParams, VectorStoreSearchResponse, @@ -502,7 +500,6 @@ export declare namespace OpenAI { type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, - type VectorStoreExpirationAfter as VectorStoreExpirationAfter, type VectorStoreSearchResponse as VectorStoreSearchResponse, VectorStoresPage as VectorStoresPage, VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, @@ -536,7 +533,6 @@ export declare namespace OpenAI { export { Evals as Evals, type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, - type EvalLogsDataSourceConfig as EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse as EvalCreateResponse, type EvalRetrieveResponse as EvalRetrieveResponse, @@ -554,7 +550,6 @@ export declare namespace OpenAI { export type ComparisonFilter = API.ComparisonFilter; export type CompoundFilter = API.CompoundFilter; export type ErrorObject = API.ErrorObject; - export type EvalItem = API.EvalItem; export type FunctionDefinition = API.FunctionDefinition; export type FunctionParameters = API.FunctionParameters; export type Metadata = API.Metadata; diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index c32159776..6282d4593 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -90,7 +90,6 @@ import { ThreadDeleted, ThreadUpdateParams, Threads, - TruncationObject, } from './threads/threads'; import { Chat } from './chat/chat'; @@ -189,7 +188,6 @@ export declare namespace Beta { type AssistantToolChoiceOption as AssistantToolChoiceOption, type Thread as Thread, type ThreadDeleted as ThreadDeleted, - type TruncationObject as TruncationObject, type ThreadCreateParams as ThreadCreateParams, type ThreadUpdateParams as ThreadUpdateParams, type ThreadCreateAndRunParams as ThreadCreateAndRunParams, diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index 296fdba75..b9cef17cb 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -29,7 +29,6 @@ export { type AssistantToolChoiceOption, type Thread, type ThreadDeleted, - type TruncationObject, type ThreadCreateParams, type ThreadUpdateParams, type ThreadCreateAndRunParams, diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index 89d00dcf6..f67a1edde 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -63,7 +63,6 @@ export { type AssistantToolChoiceOption, type Thread, type ThreadDeleted, - type TruncationObject, type ThreadCreateParams, type ThreadUpdateParams, type ThreadCreateAndRunParams, diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 608ef6481..25356df3c 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -564,7 +564,7 @@ export interface Run { * Controls for how a thread will be truncated prior to the run. Use this to * control the intial context window of the run. */ - truncation_strategy: ThreadsAPI.TruncationObject | null; + truncation_strategy: Run.TruncationStrategy | null; /** * Usage statistics related to the run. This value will be `null` if the run is not @@ -639,6 +639,26 @@ export namespace Run { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } + /** * Usage statistics related to the run. This value will be `null` if the run is not * in a terminal state (i.e. `in_progress`, `queued`, etc.). @@ -842,7 +862,7 @@ export interface RunCreateParamsBase { * Body param: Controls for how a thread will be truncated prior to the run. Use * this to control the intial context window of the run. */ - truncation_strategy?: ThreadsAPI.TruncationObject | null; + truncation_strategy?: RunCreateParams.TruncationStrategy | null; } export namespace RunCreateParams { @@ -901,6 +921,26 @@ export namespace RunCreateParams { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } + export type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; export type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 8b0332fb8..c0c6bc8e4 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -346,26 +346,6 @@ export interface ThreadDeleted { object: 'thread.deleted'; } -/** - * Controls for how a thread will be truncated prior to the run. Use this to - * control the intial context window of the run. - */ -export interface TruncationObject { - /** - * The truncation strategy to use for the thread. The default is `auto`. If set to - * `last_messages`, the thread will be truncated to the n most recent messages in - * the thread. When set to `auto`, messages in the middle of the thread will be - * dropped to fit the context length of the model, `max_prompt_tokens`. - */ - type: 'auto' | 'last_messages'; - - /** - * The number of most recent messages from the thread when constructing the context - * for the run. - */ - last_messages?: number | null; -} - export interface ThreadCreateParams { /** * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to @@ -754,7 +734,7 @@ export interface ThreadCreateAndRunParamsBase { * Controls for how a thread will be truncated prior to the run. Use this to * control the intial context window of the run. */ - truncation_strategy?: TruncationObject | null; + truncation_strategy?: ThreadCreateAndRunParams.TruncationStrategy | null; } export namespace ThreadCreateAndRunParams { @@ -985,6 +965,26 @@ export namespace ThreadCreateAndRunParams { } } + /** + * Controls for how a thread will be truncated prior to the run. Use this to + * control the intial context window of the run. + */ + export interface TruncationStrategy { + /** + * The truncation strategy to use for the thread. The default is `auto`. If set to + * `last_messages`, the thread will be truncated to the n most recent messages in + * the thread. When set to `auto`, messages in the middle of the thread will be + * dropped to fit the context length of the model, `max_prompt_tokens`. + */ + type: 'auto' | 'last_messages'; + + /** + * The number of most recent messages from the thread when constructing the context + * for the run. + */ + last_messages?: number | null; + } + export type ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; export type ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; } @@ -1684,7 +1684,6 @@ export declare namespace Threads { type AssistantToolChoiceOption as AssistantToolChoiceOption, type Thread as Thread, type ThreadDeleted as ThreadDeleted, - type TruncationObject as TruncationObject, type ThreadCreateParams as ThreadCreateParams, type ThreadUpdateParams as ThreadUpdateParams, type ThreadCreateAndRunParams as ThreadCreateAndRunParams, diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index 5370c51b9..08c898ace 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -5,14 +5,12 @@ import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as Shared from '../shared'; import * as GraderModelsAPI from '../graders/grader-models'; +import * as ResponsesAPI from '../responses/responses'; import * as RunsAPI from './runs/runs'; import { CreateEvalCompletionsRunDataSource, CreateEvalJSONLRunDataSource, - CreateEvalResponsesRunDataSource, EvalAPIError, - EvalJSONLFileContentSource, - EvalJSONLFileIDSource, RunCancelResponse, RunCreateParams, RunCreateResponse, @@ -106,36 +104,6 @@ export interface EvalCustomDataSourceConfig { type: 'custom'; } -/** - * A LogsDataSourceConfig which specifies the metadata property of your logs query. - * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The - * schema returned by this data source config is used to defined what variables are - * available in your evals. `item` and `sample` are both defined when using this - * data source config. - */ -export interface EvalLogsDataSourceConfig { - /** - * The json schema for the run data source items. Learn how to build JSON schemas - * [here](https://json-schema.org/). - */ - schema: Record; - - /** - * The type of data source. Always `logs`. - */ - type: 'logs'; - - /** - * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format, and - * querying for objects via API or the dashboard. - * - * Keys are strings with a maximum length of 64 characters. Values are strings with - * a maximum length of 512 characters. - */ - metadata?: Shared.Metadata | null; -} - /** * @deprecated Deprecated in favor of LogsDataSourceConfig. */ @@ -186,7 +154,7 @@ export interface EvalCreateResponse { */ data_source_config: | EvalCustomDataSourceConfig - | EvalLogsDataSourceConfig + | EvalCreateResponse.Logs | EvalStoredCompletionsDataSourceConfig; /** @@ -222,6 +190,36 @@ export interface EvalCreateResponse { } export namespace EvalCreateResponse { + /** + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ + export interface Logs { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + /** * A TextSimilarityGrader object which grades text based on similarity metrics. */ @@ -277,7 +275,7 @@ export interface EvalRetrieveResponse { */ data_source_config: | EvalCustomDataSourceConfig - | EvalLogsDataSourceConfig + | EvalRetrieveResponse.Logs | EvalStoredCompletionsDataSourceConfig; /** @@ -313,6 +311,36 @@ export interface EvalRetrieveResponse { } export namespace EvalRetrieveResponse { + /** + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ + export interface Logs { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + /** * A TextSimilarityGrader object which grades text based on similarity metrics. */ @@ -368,7 +396,7 @@ export interface EvalUpdateResponse { */ data_source_config: | EvalCustomDataSourceConfig - | EvalLogsDataSourceConfig + | EvalUpdateResponse.Logs | EvalStoredCompletionsDataSourceConfig; /** @@ -404,6 +432,36 @@ export interface EvalUpdateResponse { } export namespace EvalUpdateResponse { + /** + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ + export interface Logs { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + /** * A TextSimilarityGrader object which grades text based on similarity metrics. */ @@ -459,7 +517,7 @@ export interface EvalListResponse { */ data_source_config: | EvalCustomDataSourceConfig - | EvalLogsDataSourceConfig + | EvalListResponse.Logs | EvalStoredCompletionsDataSourceConfig; /** @@ -495,6 +553,36 @@ export interface EvalListResponse { } export namespace EvalListResponse { + /** + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ + export interface Logs { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + /** * A TextSimilarityGrader object which grades text based on similarity metrics. */ @@ -634,7 +722,7 @@ export namespace EvalCreateParams { * A list of chat messages forming the prompt or context. May include variable * references to the "item" namespace, ie {{item.name}}. */ - input: Array; + input: Array; /** * The labels to classify to each item in the evaluation. @@ -674,6 +762,48 @@ export namespace EvalCreateParams { */ role: string; } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } } /** @@ -745,7 +875,6 @@ Evals.RunListResponsesPage = RunListResponsesPage; export declare namespace Evals { export { type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, - type EvalLogsDataSourceConfig as EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse as EvalCreateResponse, type EvalRetrieveResponse as EvalRetrieveResponse, @@ -762,10 +891,7 @@ export declare namespace Evals { Runs as Runs, type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, - type CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource, type EvalAPIError as EvalAPIError, - type EvalJSONLFileContentSource as EvalJSONLFileContentSource, - type EvalJSONLFileIDSource as EvalJSONLFileIDSource, type RunCreateResponse as RunCreateResponse, type RunRetrieveResponse as RunRetrieveResponse, type RunListResponse as RunListResponse, diff --git a/src/resources/evals/index.ts b/src/resources/evals/index.ts index 084fc9ad6..b2627fbf3 100644 --- a/src/resources/evals/index.ts +++ b/src/resources/evals/index.ts @@ -4,7 +4,6 @@ export { EvalListResponsesPage, Evals, type EvalCustomDataSourceConfig, - type EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse, type EvalRetrieveResponse, @@ -20,10 +19,7 @@ export { Runs, type CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource, - type CreateEvalResponsesRunDataSource, type EvalAPIError, - type EvalJSONLFileContentSource, - type EvalJSONLFileIDSource, type RunCreateResponse, type RunRetrieveResponse, type RunListResponse, diff --git a/src/resources/evals/runs/index.ts b/src/resources/evals/runs/index.ts index 8e13e67df..d0e18bff4 100644 --- a/src/resources/evals/runs/index.ts +++ b/src/resources/evals/runs/index.ts @@ -12,10 +12,7 @@ export { Runs, type CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource, - type CreateEvalResponsesRunDataSource, type EvalAPIError, - type EvalJSONLFileContentSource, - type EvalJSONLFileIDSource, type RunCreateResponse, type RunRetrieveResponse, type RunListResponse, diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts index dec4dcb51..31883e6b5 100644 --- a/src/resources/evals/runs/runs.ts +++ b/src/resources/evals/runs/runs.ts @@ -88,8 +88,8 @@ export interface CreateEvalCompletionsRunDataSource { * A StoredCompletionsRunDataSource configuration describing a set of filters */ source: - | EvalJSONLFileContentSource - | EvalJSONLFileIDSource + | CreateEvalCompletionsRunDataSource.FileContent + | CreateEvalCompletionsRunDataSource.FileID | CreateEvalCompletionsRunDataSource.StoredCompletions; /** @@ -110,6 +110,38 @@ export interface CreateEvalCompletionsRunDataSource { } export namespace CreateEvalCompletionsRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + /** * A StoredCompletionsRunDataSource configuration describing a set of filters */ @@ -155,7 +187,7 @@ export namespace CreateEvalCompletionsRunDataSource { * A list of chat messages forming the prompt or context. May include variable * references to the "item" namespace, ie {{item.name}}. */ - template: Array; + template: Array; /** * The type of input messages. Always `template`. @@ -163,6 +195,50 @@ export namespace CreateEvalCompletionsRunDataSource { type: 'template'; } + export namespace Template { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Message { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Message.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Message { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + export interface ItemReference { /** * A reference to a variable in the "item" namespace. Ie, "item.name" @@ -203,7 +279,7 @@ export namespace CreateEvalCompletionsRunDataSource { * eval */ export interface CreateEvalJSONLRunDataSource { - source: EvalJSONLFileContentSource | EvalJSONLFileIDSource; + source: CreateEvalJSONLRunDataSource.FileContent | CreateEvalJSONLRunDataSource.FileID; /** * The type of data source. Always `jsonl`. @@ -211,162 +287,37 @@ export interface CreateEvalJSONLRunDataSource { type: 'jsonl'; } -/** - * A ResponsesRunDataSource object describing a model sampling configuration. - */ -export interface CreateEvalResponsesRunDataSource { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - source: EvalJSONLFileContentSource | EvalJSONLFileIDSource | CreateEvalResponsesRunDataSource.Responses; - - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - input_messages?: CreateEvalResponsesRunDataSource.Template | CreateEvalResponsesRunDataSource.ItemReference; - - /** - * The name of the model to use for generating completions (e.g. "o3-mini"). - */ - model?: string; - - sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams; -} - -export namespace CreateEvalResponsesRunDataSource { - /** - * A EvalResponsesSource object describing a run data source configuration. - */ - export interface Responses { - /** - * The type of run data source. Always `responses`. - */ - type: 'responses'; - - /** - * Only include items created after this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_after?: number | null; - - /** - * Only include items created before this timestamp (inclusive). This is a query - * parameter used to select responses. - */ - created_before?: number | null; - - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - - /** - * Optional string to search the 'instructions' field. This is a query parameter - * used to select responses. - */ - instructions_search?: string | null; - +export namespace CreateEvalJSONLRunDataSource { + export interface FileContent { /** - * Metadata filter for the responses. This is a query parameter used to select - * responses. + * The content of the jsonl file. */ - metadata?: unknown | null; + content: Array; /** - * The name of the model to find responses for. This is a query parameter used to - * select responses. + * The type of jsonl source. Always `file_content`. */ - model?: string | null; - - /** - * Optional reasoning effort parameter. This is a query parameter used to select - * responses. - */ - reasoning_effort?: Shared.ReasoningEffort | null; - - /** - * Sampling temperature. This is a query parameter used to select responses. - */ - temperature?: number | null; - - /** - * List of tool names. This is a query parameter used to select responses. - */ - tools?: Array | null; - - /** - * Nucleus sampling parameter. This is a query parameter used to select responses. - */ - top_p?: number | null; - - /** - * List of user identifiers. This is a query parameter used to select responses. - */ - users?: Array | null; + type: 'file_content'; } - export interface Template { - /** - * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. - */ - template: Array; + export namespace FileContent { + export interface Content { + item: Record; - /** - * The type of input messages. Always `template`. - */ - type: 'template'; - } - - export namespace Template { - export interface ChatMessage { - /** - * The content of the message. - */ - content: string; - - /** - * The role of the message (e.g. "system", "assistant", "user"). - */ - role: string; + sample?: Record; } } - export interface ItemReference { - /** - * A reference to a variable in the "item" namespace. Ie, "item.name" - */ - item_reference: string; - - /** - * The type of input messages. Always `item_reference`. - */ - type: 'item_reference'; - } - - export interface SamplingParams { - /** - * The maximum number of tokens in the generated output. - */ - max_completion_tokens?: number; - - /** - * A seed value to initialize the randomness, during sampling. - */ - seed?: number; - + export interface FileID { /** - * A higher temperature increases randomness in the outputs. + * The identifier of the file. */ - temperature?: number; + id: string; /** - * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + * The type of jsonl source. Always `file_id`. */ - top_p?: number; + type: 'file_id'; } } @@ -385,38 +336,6 @@ export interface EvalAPIError { message: string; } -export interface EvalJSONLFileContentSource { - /** - * The content of the jsonl file. - */ - content: Array; - - /** - * The type of jsonl source. Always `file_content`. - */ - type: 'file_content'; -} - -export namespace EvalJSONLFileContentSource { - export interface Content { - item: Record; - - sample?: Record; - } -} - -export interface EvalJSONLFileIDSource { - /** - * The identifier of the file. - */ - id: string; - - /** - * The type of jsonl source. Always `file_id`. - */ - type: 'file_id'; -} - /** * A schema representing an evaluation run. */ @@ -437,7 +356,7 @@ export interface RunCreateResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | CreateEvalResponsesRunDataSource; + | RunCreateResponse.Responses; /** * An object representing an error response from the Eval API. @@ -501,6 +420,239 @@ export interface RunCreateResponse { } export namespace RunCreateResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Responses { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Responses.FileContent | Responses.FileID | Responses.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + input_messages?: Responses.Template | Responses.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Responses.SamplingParams; + } + + export namespace Responses { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -596,7 +748,7 @@ export interface RunRetrieveResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | CreateEvalResponsesRunDataSource; + | RunRetrieveResponse.Responses; /** * An object representing an error response from the Eval API. @@ -660,6 +812,239 @@ export interface RunRetrieveResponse { } export namespace RunRetrieveResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Responses { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Responses.FileContent | Responses.FileID | Responses.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + input_messages?: Responses.Template | Responses.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Responses.SamplingParams; + } + + export namespace Responses { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -752,10 +1137,7 @@ export interface RunListResponse { /** * Information about the run's data source. */ - data_source: - | CreateEvalJSONLRunDataSource - | CreateEvalCompletionsRunDataSource - | CreateEvalResponsesRunDataSource; + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource | RunListResponse.Responses; /** * An object representing an error response from the Eval API. @@ -787,38 +1169,271 @@ export interface RunListResponse { */ name: string; - /** - * The type of the object. Always "eval.run". - */ - object: 'eval.run'; + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunListResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunListResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Responses { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Responses.FileContent | Responses.FileID | Responses.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + input_messages?: Responses.Template | Responses.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Responses.SamplingParams; + } + + export namespace Responses { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; - /** - * Usage statistics for each model during the evaluation run. - */ - per_model_usage: Array; + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } - /** - * Results per testing criteria applied during the evaluation run. - */ - per_testing_criteria_results: Array; + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; - /** - * The URL to the rendered evaluation run report on the UI dashboard. - */ - report_url: string; + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; - /** - * Counters summarizing the outcomes of the evaluation run. - */ - result_counts: RunListResponse.ResultCounts; + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; - /** - * The status of the evaluation run. - */ - status: string; -} + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } -export namespace RunListResponse { export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -922,7 +1537,7 @@ export interface RunCancelResponse { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | CreateEvalResponsesRunDataSource; + | RunCancelResponse.Responses; /** * An object representing an error response from the Eval API. @@ -986,6 +1601,239 @@ export interface RunCancelResponse { } export namespace RunCancelResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Responses { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: Responses.FileContent | Responses.FileID | Responses.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + input_messages?: Responses.Template | Responses.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Responses.SamplingParams; + } + + export namespace Responses { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + export interface PerModelUsage { /** * The number of tokens retrieved from cache. @@ -1068,7 +1916,7 @@ export interface RunCreateParams { data_source: | CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource - | CreateEvalResponsesRunDataSource; + | RunCreateParams.CreateEvalResponsesRunDataSource; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -1086,6 +1934,246 @@ export interface RunCreateParams { name?: string; } +export namespace RunCreateParams { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface CreateEvalResponsesRunDataSource { + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + source: + | CreateEvalResponsesRunDataSource.FileContent + | CreateEvalResponsesRunDataSource.FileID + | CreateEvalResponsesRunDataSource.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + input_messages?: + | CreateEvalResponsesRunDataSource.Template + | CreateEvalResponsesRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams; + } + + export namespace CreateEvalResponsesRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Whether the response has tool calls. This is a query parameter used to select + * responses. + */ + has_tool_calls?: boolean | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the "item" namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the "item" namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } +} + export interface RunListParams extends CursorPageParams { /** * Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for @@ -1108,10 +2196,7 @@ export declare namespace Runs { export { type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, - type CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource, type EvalAPIError as EvalAPIError, - type EvalJSONLFileContentSource as EvalJSONLFileContentSource, - type EvalJSONLFileIDSource as EvalJSONLFileIDSource, type RunCreateResponse as RunCreateResponse, type RunRetrieveResponse as RunRetrieveResponse, type RunListResponse as RunListResponse, diff --git a/src/resources/graders/grader-models.ts b/src/resources/graders/grader-models.ts index 8a1a0eddd..d2c335300 100644 --- a/src/resources/graders/grader-models.ts +++ b/src/resources/graders/grader-models.ts @@ -1,7 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; -import * as Shared from '../shared'; +import * as ResponsesAPI from '../responses/responses'; export class GraderModels extends APIResource {} @@ -10,7 +10,7 @@ export class GraderModels extends APIResource {} * the evaluation. */ export interface LabelModelGrader { - input: Array; + input: Array; /** * The labels to assign to each item in the evaluation. @@ -38,6 +38,50 @@ export interface LabelModelGrader { type: 'label_model'; } +export namespace LabelModelGrader { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } +} + /** * A MultiGrader object combines the output of multiple graders to produce a single * score. @@ -59,7 +103,7 @@ export interface MultiGrader { name: string; /** - * The type of grader. + * The object type, which is always `multi`. */ type: 'multi'; } @@ -96,7 +140,7 @@ export interface ScoreModelGrader { /** * The input text. This may include template strings. */ - input: Array; + input: Array; /** * The model to use for the evaluation. @@ -124,6 +168,50 @@ export interface ScoreModelGrader { sampling_params?: unknown; } +export namespace ScoreModelGrader { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } +} + /** * A StringCheckGrader object that performs a string comparison between input and * reference using a specified operation. diff --git a/src/resources/index.ts b/src/resources/index.ts index 0f21e596c..9d827615c 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -33,7 +33,6 @@ export { EvalListResponsesPage, Evals, type EvalCustomDataSourceConfig, - type EvalLogsDataSourceConfig, type EvalStoredCompletionsDataSourceConfig, type EvalCreateResponse, type EvalRetrieveResponse, @@ -91,7 +90,6 @@ export { type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, - type VectorStoreExpirationAfter, type VectorStoreSearchResponse, type VectorStoreCreateParams, type VectorStoreUpdateParams, diff --git a/src/resources/shared.ts b/src/resources/shared.ts index d0c2aaa49..adea184fd 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,7 +1,5 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as ResponsesAPI from './responses/responses'; - export type AllModels = | (string & {}) | ChatModel @@ -43,6 +41,7 @@ export type ChatModel = | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview-2025-03-11' | 'chatgpt-4o-latest' + | 'codex-mini-latest' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' @@ -120,48 +119,6 @@ export interface ErrorObject { type: string; } -/** - * A message input to the model with a role indicating instruction following - * hierarchy. Instructions given with the `developer` or `system` role take - * precedence over instructions given with the `user` role. Messages with the - * `assistant` role are presumed to have been generated by the model in previous - * interactions. - */ -export interface EvalItem { - /** - * Text inputs to the model - can contain template strings. - */ - content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; - - /** - * The role of the message input. One of `user`, `assistant`, `system`, or - * `developer`. - */ - role: 'user' | 'assistant' | 'system' | 'developer'; - - /** - * The type of the message input. Always `message`. - */ - type?: 'message'; -} - -export namespace EvalItem { - /** - * A text output from the model. - */ - export interface OutputText { - /** - * The text output from the model. - */ - text: string; - - /** - * The type of the output text. Always `output_text`. - */ - type: 'output_text'; - } -} - export interface FunctionDefinition { /** * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain diff --git a/src/resources/vector-stores/index.ts b/src/resources/vector-stores/index.ts index c3c042387..9cbcbc0b2 100644 --- a/src/resources/vector-stores/index.ts +++ b/src/resources/vector-stores/index.ts @@ -30,7 +30,6 @@ export { type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, - type VectorStoreExpirationAfter, type VectorStoreSearchResponse, type VectorStoreCreateParams, type VectorStoreUpdateParams, diff --git a/src/resources/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts index 66438be02..7d61e7fd6 100644 --- a/src/resources/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -249,7 +249,7 @@ export interface VectorStore { /** * The expiration policy for a vector store. */ - expires_after?: VectorStoreExpirationAfter; + expires_after?: VectorStore.ExpiresAfter; /** * The Unix timestamp (in seconds) for when the vector store will expire. @@ -284,6 +284,22 @@ export namespace VectorStore { */ total: number; } + + /** + * The expiration policy for a vector store. + */ + export interface ExpiresAfter { + /** + * Anchor timestamp after which the expiration policy applies. Supported anchors: + * `last_active_at`. + */ + anchor: 'last_active_at'; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; + } } export interface VectorStoreDeleted { @@ -294,22 +310,6 @@ export interface VectorStoreDeleted { object: 'vector_store.deleted'; } -/** - * The expiration policy for a vector store. - */ -export interface VectorStoreExpirationAfter { - /** - * Anchor timestamp after which the expiration policy applies. Supported anchors: - * `last_active_at`. - */ - anchor: 'last_active_at'; - - /** - * The number of days after the anchor time that the vector store will expire. - */ - days: number; -} - export interface VectorStoreSearchResponse { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -365,7 +365,7 @@ export interface VectorStoreCreateParams { /** * The expiration policy for a vector store. */ - expires_after?: VectorStoreExpirationAfter; + expires_after?: VectorStoreCreateParams.ExpiresAfter; /** * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that @@ -390,11 +390,29 @@ export interface VectorStoreCreateParams { name?: string; } +export namespace VectorStoreCreateParams { + /** + * The expiration policy for a vector store. + */ + export interface ExpiresAfter { + /** + * Anchor timestamp after which the expiration policy applies. Supported anchors: + * `last_active_at`. + */ + anchor: 'last_active_at'; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; + } +} + export interface VectorStoreUpdateParams { /** * The expiration policy for a vector store. */ - expires_after?: VectorStoreExpirationAfter | null; + expires_after?: VectorStoreUpdateParams.ExpiresAfter | null; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -412,6 +430,24 @@ export interface VectorStoreUpdateParams { name?: string | null; } +export namespace VectorStoreUpdateParams { + /** + * The expiration policy for a vector store. + */ + export interface ExpiresAfter { + /** + * Anchor timestamp after which the expiration policy applies. Supported anchors: + * `last_active_at`. + */ + anchor: 'last_active_at'; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; + } +} + export interface VectorStoreListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place @@ -485,7 +521,6 @@ export declare namespace VectorStores { type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, - type VectorStoreExpirationAfter as VectorStoreExpirationAfter, type VectorStoreSearchResponse as VectorStoreSearchResponse, VectorStoresPage as VectorStoresPage, VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, From 416c89bc773e6a79a238123467b4431da39c072d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 17:11:44 +0000 Subject: [PATCH 530/533] release: 4.99.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 10 ++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 14 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a279d9124..a65cc01f9 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.98.0" + ".": "4.99.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f1a39177..2a552b8ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Changelog +## 4.99.0 (2025-05-16) + +Full Changelog: [v4.98.0...v4.99.0](https://github.com/openai/openai-node/compare/v4.98.0...v4.99.0) + +### Features + +* **api:** manual updates ([75eb804](https://github.com/openai/openai-node/commit/75eb804edd6ad653eaa22d47f8c6d09ee845ebf4)) +* **api:** responses x eval api ([5029f1a](https://github.com/openai/openai-node/commit/5029f1a05eb1e8601ada06e0a5ba49f4c2b83c02)) +* **api:** Updating Assistants and Evals API schemas ([27fd517](https://github.com/openai/openai-node/commit/27fd5173b20f75debe96024ae8f1ce58a8254d26)) + ## 4.98.0 (2025-05-08) Full Changelog: [v4.97.0...v4.98.0](https://github.com/openai/openai-node/compare/v4.97.0...v4.98.0) diff --git a/jsr.json b/jsr.json index 25bbc9ac2..2f29927c6 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.98.0", + "version": "4.99.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index d34efceb0..0d756ef85 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.98.0", + "version": "4.99.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index f64cc03ff..c7ee5a162 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.98.0'; // x-release-please-version +export const VERSION = '4.99.0'; // x-release-please-version From 5123fe08a56f3d0040b1cc67129382f3eacc3cca Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 19:35:22 +0000 Subject: [PATCH 531/533] chore(internal): version bump From 3f6f248191b45015924be76fd5154d149c4ed8a0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 19:41:32 +0000 Subject: [PATCH 532/533] feat(api): further updates for evals API --- .stats.yml | 4 +- src/resources/beta/realtime/realtime.ts | 2 +- .../beta/realtime/transcription-sessions.ts | 2 +- src/resources/evals/evals.ts | 31 ++--- src/resources/evals/runs/runs.ts | 109 ++++++++++-------- 5 files changed, 82 insertions(+), 66 deletions(-) diff --git a/.stats.yml b/.stats.yml index a3c5d081d..afa33d93b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5fa16b9a02985ae06e41be14946a9c325dc672fb014b3c19abca65880c6990e6.yml -openapi_spec_hash: da3e669f65130043b1170048c0727890 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-262e171d0a8150ea1192474d16ba3afdf9a054b399f1a49a9c9b697a3073c136.yml +openapi_spec_hash: 33e00a48df8f94c94f46290c489f132b config_hash: d8d5fda350f6db77c784f35429741a2e diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts index 5012b1edd..26fba883e 100644 --- a/src/resources/beta/realtime/realtime.ts +++ b/src/resources/beta/realtime/realtime.ts @@ -2571,7 +2571,7 @@ export interface TranscriptionSessionUpdatedEvent { * A new Realtime transcription session configuration. * * When a session is created on the server via REST API, the session object also - * contains an ephemeral key. Default TTL for keys is one minute. This property is + * contains an ephemeral key. Default TTL for keys is 10 minutes. This property is * not present when a session is updated via the WebSocket API. */ session: TranscriptionSessionsAPI.TranscriptionSession; diff --git a/src/resources/beta/realtime/transcription-sessions.ts b/src/resources/beta/realtime/transcription-sessions.ts index 61e58a8e8..83e8c47ad 100644 --- a/src/resources/beta/realtime/transcription-sessions.ts +++ b/src/resources/beta/realtime/transcription-sessions.ts @@ -35,7 +35,7 @@ export class TranscriptionSessions extends APIResource { * A new Realtime transcription session configuration. * * When a session is created on the server via REST API, the session object also - * contains an ephemeral key. Default TTL for keys is one minute. This property is + * contains an ephemeral key. Default TTL for keys is 10 minutes. This property is * not present when a session is updated via the WebSocket API. */ export interface TranscriptionSession { diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts index 08c898ace..05a656619 100644 --- a/src/resources/evals/evals.ts +++ b/src/resources/evals/evals.ts @@ -28,7 +28,8 @@ export class Evals extends APIResource { /** * Create the structure of an evaluation that can be used to test a model's - * performance. An evaluation is a set of testing criteria and a datasource. After + * performance. An evaluation is a set of testing criteria and the config for a + * data source, which dictates the schema of the data used in the evaluation. After * creating an evaluation, you can run it on different models and model parameters. * We support several types of graders and datasources. For more information, see * the [Evals guide](https://platform.openai.com/docs/guides/evals). @@ -115,9 +116,9 @@ export interface EvalStoredCompletionsDataSourceConfig { schema: Record; /** - * The type of data source. Always `stored-completions`. + * The type of data source. Always `stored_completions`. */ - type: 'stored-completions'; + type: 'stored_completions'; /** * Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -136,7 +137,7 @@ export interface EvalStoredCompletionsDataSourceConfig { * * - Improve the quality of my chatbot * - See how well my chatbot handles customer support - * - Check if o3-mini is better at my usecase than gpt-4o + * - Check if o4-mini is better at my usecase than gpt-4o */ export interface EvalCreateResponse { /** @@ -257,7 +258,7 @@ export namespace EvalCreateResponse { * * - Improve the quality of my chatbot * - See how well my chatbot handles customer support - * - Check if o3-mini is better at my usecase than gpt-4o + * - Check if o4-mini is better at my usecase than gpt-4o */ export interface EvalRetrieveResponse { /** @@ -378,7 +379,7 @@ export namespace EvalRetrieveResponse { * * - Improve the quality of my chatbot * - See how well my chatbot handles customer support - * - Check if o3-mini is better at my usecase than gpt-4o + * - Check if o4-mini is better at my usecase than gpt-4o */ export interface EvalUpdateResponse { /** @@ -499,7 +500,7 @@ export namespace EvalUpdateResponse { * * - Improve the quality of my chatbot * - See how well my chatbot handles customer support - * - Check if o3-mini is better at my usecase than gpt-4o + * - Check if o4-mini is better at my usecase than gpt-4o */ export interface EvalListResponse { /** @@ -624,12 +625,16 @@ export interface EvalDeleteResponse { export interface EvalCreateParams { /** - * The configuration for the data source used for the evaluation runs. + * The configuration for the data source used for the evaluation runs. Dictates the + * schema of the data used in the evaluation. */ data_source_config: EvalCreateParams.Custom | EvalCreateParams.Logs | EvalCreateParams.StoredCompletions; /** - * A list of graders for all eval runs in this group. + * A list of graders for all eval runs in this group. Graders can reference + * variables in the data source using double curly braces notation, like + * `{{item.variable_name}}`. To reference the model's output, use the `sample` + * namespace (ie, `{{sample.output_text}}`). */ testing_criteria: Array< | EvalCreateParams.LabelModel @@ -699,13 +704,13 @@ export namespace EvalCreateParams { } /** - * Deprecated in favor of LogsDataSourceConfig. + * @deprecated Deprecated in favor of LogsDataSourceConfig. */ export interface StoredCompletions { /** - * The type of data source. Always `stored-completions`. + * The type of data source. Always `stored_completions`. */ - type: 'stored-completions'; + type: 'stored_completions'; /** * Metadata filters for the stored completions data source. @@ -720,7 +725,7 @@ export namespace EvalCreateParams { export interface LabelModel { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ input: Array; diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts index 31883e6b5..e761e2160 100644 --- a/src/resources/evals/runs/runs.ts +++ b/src/resources/evals/runs/runs.ts @@ -19,7 +19,9 @@ export class Runs extends APIResource { outputItems: OutputItemsAPI.OutputItems = new OutputItemsAPI.OutputItems(this._client); /** - * Create a new evaluation run. This is the endpoint that will kick off grading. + * Kicks off a new run for a given evaluation, specifying the data source, and what + * model configuration to use to test. The datasource will be validated against the + * schema specified in the config of the evaluation. */ create( evalId: string, @@ -85,7 +87,7 @@ export class RunListResponsesPage extends CursorPage {} */ export interface CreateEvalCompletionsRunDataSource { /** - * A StoredCompletionsRunDataSource configuration describing a set of filters + * Determines what populates the `item` namespace in this run's data source. */ source: | CreateEvalCompletionsRunDataSource.FileContent @@ -97,6 +99,12 @@ export interface CreateEvalCompletionsRunDataSource { */ type: 'completions'; + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ input_messages?: | CreateEvalCompletionsRunDataSource.Template | CreateEvalCompletionsRunDataSource.ItemReference; @@ -185,7 +193,7 @@ export namespace CreateEvalCompletionsRunDataSource { export interface Template { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ template: Array; @@ -241,7 +249,7 @@ export namespace CreateEvalCompletionsRunDataSource { export interface ItemReference { /** - * A reference to a variable in the "item" namespace. Ie, "item.name" + * A reference to a variable in the `item` namespace. Ie, "item.input_trajectory" */ item_reference: string; @@ -279,6 +287,9 @@ export namespace CreateEvalCompletionsRunDataSource { * eval */ export interface CreateEvalJSONLRunDataSource { + /** + * Determines what populates the `item` namespace in the data source. + */ source: CreateEvalJSONLRunDataSource.FileContent | CreateEvalJSONLRunDataSource.FileID; /** @@ -425,7 +436,7 @@ export namespace RunCreateResponse { */ export interface Responses { /** - * A EvalResponsesSource object describing a run data source configuration. + * Determines what populates the `item` namespace in this run's data source. */ source: Responses.FileContent | Responses.FileID | Responses.Responses; @@ -434,6 +445,12 @@ export namespace RunCreateResponse { */ type: 'responses'; + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ input_messages?: Responses.Template | Responses.ItemReference; /** @@ -498,12 +515,6 @@ export namespace RunCreateResponse { */ created_before?: number | null; - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - /** * Optional string to search the 'instructions' field. This is a query parameter * used to select responses. @@ -552,7 +563,7 @@ export namespace RunCreateResponse { export interface Template { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ template: Array; @@ -620,7 +631,7 @@ export namespace RunCreateResponse { export interface ItemReference { /** - * A reference to a variable in the "item" namespace. Ie, "item.name" + * A reference to a variable in the `item` namespace. Ie, "item.name" */ item_reference: string; @@ -817,7 +828,7 @@ export namespace RunRetrieveResponse { */ export interface Responses { /** - * A EvalResponsesSource object describing a run data source configuration. + * Determines what populates the `item` namespace in this run's data source. */ source: Responses.FileContent | Responses.FileID | Responses.Responses; @@ -826,6 +837,12 @@ export namespace RunRetrieveResponse { */ type: 'responses'; + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ input_messages?: Responses.Template | Responses.ItemReference; /** @@ -890,12 +907,6 @@ export namespace RunRetrieveResponse { */ created_before?: number | null; - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - /** * Optional string to search the 'instructions' field. This is a query parameter * used to select responses. @@ -944,7 +955,7 @@ export namespace RunRetrieveResponse { export interface Template { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ template: Array; @@ -1012,7 +1023,7 @@ export namespace RunRetrieveResponse { export interface ItemReference { /** - * A reference to a variable in the "item" namespace. Ie, "item.name" + * A reference to a variable in the `item` namespace. Ie, "item.name" */ item_reference: string; @@ -1206,7 +1217,7 @@ export namespace RunListResponse { */ export interface Responses { /** - * A EvalResponsesSource object describing a run data source configuration. + * Determines what populates the `item` namespace in this run's data source. */ source: Responses.FileContent | Responses.FileID | Responses.Responses; @@ -1215,6 +1226,12 @@ export namespace RunListResponse { */ type: 'responses'; + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ input_messages?: Responses.Template | Responses.ItemReference; /** @@ -1279,12 +1296,6 @@ export namespace RunListResponse { */ created_before?: number | null; - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - /** * Optional string to search the 'instructions' field. This is a query parameter * used to select responses. @@ -1333,7 +1344,7 @@ export namespace RunListResponse { export interface Template { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ template: Array; @@ -1401,7 +1412,7 @@ export namespace RunListResponse { export interface ItemReference { /** - * A reference to a variable in the "item" namespace. Ie, "item.name" + * A reference to a variable in the `item` namespace. Ie, "item.name" */ item_reference: string; @@ -1606,7 +1617,7 @@ export namespace RunCancelResponse { */ export interface Responses { /** - * A EvalResponsesSource object describing a run data source configuration. + * Determines what populates the `item` namespace in this run's data source. */ source: Responses.FileContent | Responses.FileID | Responses.Responses; @@ -1615,6 +1626,12 @@ export namespace RunCancelResponse { */ type: 'responses'; + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ input_messages?: Responses.Template | Responses.ItemReference; /** @@ -1679,12 +1696,6 @@ export namespace RunCancelResponse { */ created_before?: number | null; - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - /** * Optional string to search the 'instructions' field. This is a query parameter * used to select responses. @@ -1733,7 +1744,7 @@ export namespace RunCancelResponse { export interface Template { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ template: Array; @@ -1801,7 +1812,7 @@ export namespace RunCancelResponse { export interface ItemReference { /** - * A reference to a variable in the "item" namespace. Ie, "item.name" + * A reference to a variable in the `item` namespace. Ie, "item.name" */ item_reference: string; @@ -1940,7 +1951,7 @@ export namespace RunCreateParams { */ export interface CreateEvalResponsesRunDataSource { /** - * A EvalResponsesSource object describing a run data source configuration. + * Determines what populates the `item` namespace in this run's data source. */ source: | CreateEvalResponsesRunDataSource.FileContent @@ -1952,6 +1963,12 @@ export namespace RunCreateParams { */ type: 'responses'; + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ input_messages?: | CreateEvalResponsesRunDataSource.Template | CreateEvalResponsesRunDataSource.ItemReference; @@ -2018,12 +2035,6 @@ export namespace RunCreateParams { */ created_before?: number | null; - /** - * Whether the response has tool calls. This is a query parameter used to select - * responses. - */ - has_tool_calls?: boolean | null; - /** * Optional string to search the 'instructions' field. This is a query parameter * used to select responses. @@ -2072,7 +2083,7 @@ export namespace RunCreateParams { export interface Template { /** * A list of chat messages forming the prompt or context. May include variable - * references to the "item" namespace, ie {{item.name}}. + * references to the `item` namespace, ie {{item.name}}. */ template: Array; @@ -2140,7 +2151,7 @@ export namespace RunCreateParams { export interface ItemReference { /** - * A reference to a variable in the "item" namespace. Ie, "item.name" + * A reference to a variable in the `item` namespace. Ie, "item.name" */ item_reference: string; From 29e608f24af8880fcdc0d45cc64321e4856e47ba Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 19:42:24 +0000 Subject: [PATCH 533/533] release: 4.100.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a65cc01f9..989bed91e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.99.0" + ".": "4.100.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a552b8ed..adda41e52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 4.100.0 (2025-05-16) + +Full Changelog: [v4.99.0...v4.100.0](https://github.com/openai/openai-node/compare/v4.99.0...v4.100.0) + +### Features + +* **api:** further updates for evals API ([3f6f248](https://github.com/openai/openai-node/commit/3f6f248191b45015924be76fd5154d149c4ed8a0)) + + +### Chores + +* **internal:** version bump ([5123fe0](https://github.com/openai/openai-node/commit/5123fe08a56f3d0040b1cc67129382f3eacc3cca)) + ## 4.99.0 (2025-05-16) Full Changelog: [v4.98.0...v4.99.0](https://github.com/openai/openai-node/compare/v4.98.0...v4.99.0) diff --git a/jsr.json b/jsr.json index 2f29927c6..3c2d41b0f 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.99.0", + "version": "4.100.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 0d756ef85..23205e569 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.99.0", + "version": "4.100.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c7ee5a162..62b43ffce 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.99.0'; // x-release-please-version +export const VERSION = '4.100.0'; // x-release-please-version