From 3159e6bdcc815501147db1203f6472c38fbda177 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 22 Feb 2024 16:21:21 +0000
Subject: [PATCH 001/533] chore: update dependency @types/ws to v8.5.10 (#683)
---
ecosystem-tests/node-ts-cjs-auto/package-lock.json | 6 +++---
ecosystem-tests/node-ts-cjs/package-lock.json | 6 +++---
ecosystem-tests/node-ts4.5-jest27/package-lock.json | 6 +++---
3 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/ecosystem-tests/node-ts-cjs-auto/package-lock.json b/ecosystem-tests/node-ts-cjs-auto/package-lock.json
index 56cf77290..df381f1b5 100644
--- a/ecosystem-tests/node-ts-cjs-auto/package-lock.json
+++ b/ecosystem-tests/node-ts-cjs-auto/package-lock.json
@@ -1121,9 +1121,9 @@
"dev": true
},
"node_modules/@types/ws": {
- "version": "8.5.5",
- "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.5.tgz",
- "integrity": "sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg==",
+ "version": "8.5.10",
+ "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz",
+ "integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==",
"dev": true,
"dependencies": {
"@types/node": "*"
diff --git a/ecosystem-tests/node-ts-cjs/package-lock.json b/ecosystem-tests/node-ts-cjs/package-lock.json
index f770cacac..c39fc8f1c 100644
--- a/ecosystem-tests/node-ts-cjs/package-lock.json
+++ b/ecosystem-tests/node-ts-cjs/package-lock.json
@@ -1163,9 +1163,9 @@
"dev": true
},
"node_modules/@types/ws": {
- "version": "8.5.5",
- "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.5.tgz",
- "integrity": "sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg==",
+ "version": "8.5.10",
+ "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz",
+ "integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==",
"dev": true,
"dependencies": {
"@types/node": "*"
diff --git a/ecosystem-tests/node-ts4.5-jest27/package-lock.json b/ecosystem-tests/node-ts4.5-jest27/package-lock.json
index 682b0f7a6..f46e12de9 100644
--- a/ecosystem-tests/node-ts4.5-jest27/package-lock.json
+++ b/ecosystem-tests/node-ts4.5-jest27/package-lock.json
@@ -1096,9 +1096,9 @@
"dev": true
},
"node_modules/@types/ws": {
- "version": "8.5.5",
- "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.5.tgz",
- "integrity": "sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg==",
+ "version": "8.5.10",
+ "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz",
+ "integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==",
"dev": true,
"dependencies": {
"@types/node": "*"
From 684f139c0d913e64db171fe5877c7c5980e29813 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 22 Feb 2024 12:59:26 -0500
Subject: [PATCH 002/533] chore(ci): update actions/setup-node action to v4
(#685)
---
.github/workflows/ci.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index b342025cc..0f699bc95 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -17,7 +17,7 @@ jobs:
- uses: actions/checkout@v3
- name: Set up Node
- uses: actions/setup-node@v3
+ uses: actions/setup-node@v4
with:
node-version: '18'
From 90a733e6ff714e6fef3c71ae36e18eee6787a666 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 22 Feb 2024 15:16:53 -0500
Subject: [PATCH 003/533] chore(types): extract run status to a named type
(#686)
---
.github/workflows/ci.yml | 2 +-
api.md | 1 +
src/resources/beta/threads/index.ts | 1 +
src/resources/beta/threads/runs/index.ts | 1 +
src/resources/beta/threads/runs/runs.ts | 26 ++++++++++++++++--------
src/resources/beta/threads/threads.ts | 1 +
6 files changed, 22 insertions(+), 10 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 0f699bc95..f51c7a308 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -14,7 +14,7 @@ jobs:
if: github.repository == 'openai/openai-node'
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Set up Node
uses: actions/setup-node@v4
diff --git a/api.md b/api.md
index 68d8545cc..ff3180cba 100644
--- a/api.md
+++ b/api.md
@@ -221,6 +221,7 @@ Types:
- RequiredActionFunctionToolCall
- Run
+- RunStatus
Methods:
diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts
index 53e26a5c6..54a02dd03 100644
--- a/src/resources/beta/threads/index.ts
+++ b/src/resources/beta/threads/index.ts
@@ -14,6 +14,7 @@ export {
export {
RequiredActionFunctionToolCall,
Run,
+ RunStatus,
RunCreateParams,
RunUpdateParams,
RunListParams,
diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts
index a2261f961..b11736c5c 100644
--- a/src/resources/beta/threads/runs/index.ts
+++ b/src/resources/beta/threads/runs/index.ts
@@ -14,6 +14,7 @@ export {
export {
RequiredActionFunctionToolCall,
Run,
+ RunStatus,
RunCreateParams,
RunUpdateParams,
RunListParams,
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index 749d2c7f6..9582a060b 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -242,15 +242,7 @@ export interface Run {
* `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or
* `expired`.
*/
- status:
- | 'queued'
- | 'in_progress'
- | 'requires_action'
- | 'cancelling'
- | 'cancelled'
- | 'failed'
- | 'completed'
- | 'expired';
+ status: RunStatus;
/**
* The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
@@ -361,6 +353,21 @@ export namespace Run {
}
}
+/**
+ * The status of the run, which can be either `queued`, `in_progress`,
+ * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or
+ * `expired`.
+ */
+export type RunStatus =
+ | 'queued'
+ | 'in_progress'
+ | 'requires_action'
+ | 'cancelling'
+ | 'cancelled'
+ | 'failed'
+ | 'completed'
+ | 'expired';
+
export interface RunCreateParams {
/**
* The ID of the
@@ -486,6 +493,7 @@ export namespace RunSubmitToolOutputsParams {
export namespace Runs {
export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall;
export import Run = RunsAPI.Run;
+ export import RunStatus = RunsAPI.RunStatus;
export import RunsPage = RunsAPI.RunsPage;
export import RunCreateParams = RunsAPI.RunCreateParams;
export import RunUpdateParams = RunsAPI.RunUpdateParams;
diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts
index 8bbe1804f..5aa1f8c25 100644
--- a/src/resources/beta/threads/threads.ts
+++ b/src/resources/beta/threads/threads.ts
@@ -298,6 +298,7 @@ export namespace Threads {
export import Runs = RunsAPI.Runs;
export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall;
export import Run = RunsAPI.Run;
+ export import RunStatus = RunsAPI.RunStatus;
export import RunsPage = RunsAPI.RunsPage;
export import RunCreateParams = RunsAPI.RunCreateParams;
export import RunUpdateParams = RunsAPI.RunUpdateParams;
From 0ae349cf18f307a3e901149de8411caaa370fb95 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 23 Feb 2024 06:35:12 -0500
Subject: [PATCH 004/533] chore: update @types/react to 18.2.58,
@types/react-dom to 18.2.19 (#688)
---
ecosystem-tests/vercel-edge/package-lock.json | 16 ++++++++--------
ecosystem-tests/vercel-edge/package.json | 4 ++--
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json
index 6b44e0774..d1c67b718 100644
--- a/ecosystem-tests/vercel-edge/package-lock.json
+++ b/ecosystem-tests/vercel-edge/package-lock.json
@@ -15,8 +15,8 @@
},
"devDependencies": {
"@types/node": "20.3.3",
- "@types/react": "18.2.13",
- "@types/react-dom": "18.2.6",
+ "@types/react": "18.2.58",
+ "@types/react-dom": "18.2.19",
"edge-runtime": "^2.4.3",
"fastest-levenshtein": "^1.0.16",
"jest": "^29.5.0",
@@ -1562,9 +1562,9 @@
"dev": true
},
"node_modules/@types/react": {
- "version": "18.2.13",
- "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.13.tgz",
- "integrity": "sha512-vJ+zElvi/Zn9cVXB5slX2xL8PZodPCwPRDpittQdw43JR2AJ5k3vKdgJJyneV/cYgIbLQUwXa9JVDvUZXGba+Q==",
+ "version": "18.2.58",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.58.tgz",
+ "integrity": "sha512-TaGvMNhxvG2Q0K0aYxiKfNDS5m5ZsoIBBbtfUorxdH4NGSXIlYvZxLJI+9Dd3KjeB3780bciLyAb7ylO8pLhPw==",
"dev": true,
"dependencies": {
"@types/prop-types": "*",
@@ -1573,9 +1573,9 @@
}
},
"node_modules/@types/react-dom": {
- "version": "18.2.6",
- "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.6.tgz",
- "integrity": "sha512-2et4PDvg6PVCyS7fuTc4gPoksV58bW0RwSxWKcPRcHZf0PRUGq03TKcD/rUHe3azfV6/5/biUBJw+HhCQjaP0A==",
+ "version": "18.2.19",
+ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.19.tgz",
+ "integrity": "sha512-aZvQL6uUbIJpjZk4U8JZGbau9KDeAwMfmhyWorxgBkqDIEf6ROjRozcmPIicqsUwPUjbkDfHKgGee1Lq65APcA==",
"dev": true,
"dependencies": {
"@types/react": "*"
diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json
index 9ebff4bbc..506a9d08c 100644
--- a/ecosystem-tests/vercel-edge/package.json
+++ b/ecosystem-tests/vercel-edge/package.json
@@ -21,8 +21,8 @@
},
"devDependencies": {
"@types/node": "20.3.3",
- "@types/react": "18.2.13",
- "@types/react-dom": "18.2.6",
+ "@types/react": "18.2.58",
+ "@types/react-dom": "18.2.19",
"edge-runtime": "^2.4.3",
"fastest-levenshtein": "^1.0.16",
"jest": "^29.5.0",
From 5601376ef6cee6c456d94983dbc6745e2cf2ce08 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 23 Feb 2024 09:51:32 -0500
Subject: [PATCH 005/533] chore: update dependency next to v13.5.6 (#689)
---
ecosystem-tests/vercel-edge/package-lock.json | 127 ++++++++----------
ecosystem-tests/vercel-edge/package.json | 2 +-
2 files changed, 60 insertions(+), 69 deletions(-)
diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json
index d1c67b718..ebac7eb81 100644
--- a/ecosystem-tests/vercel-edge/package-lock.json
+++ b/ecosystem-tests/vercel-edge/package-lock.json
@@ -9,7 +9,7 @@
"version": "0.1.0",
"dependencies": {
"ai": "2.1.34",
- "next": "13.4.6",
+ "next": "13.5.6",
"react": "18.2.0",
"react-dom": "18.2.0"
},
@@ -1171,14 +1171,14 @@
}
},
"node_modules/@next/env": {
- "version": "13.4.6",
- "resolved": "https://registry.npmjs.org/@next/env/-/env-13.4.6.tgz",
- "integrity": "sha512-nqUxEtvDqFhmV1/awSg0K2XHNwkftNaiUqCYO9e6+MYmqNObpKVl7OgMkGaQ2SZnFx5YqF0t60ZJTlyJIDAijg=="
+ "version": "13.5.6",
+ "resolved": "https://registry.npmjs.org/@next/env/-/env-13.5.6.tgz",
+ "integrity": "sha512-Yac/bV5sBGkkEXmAX5FWPS9Mmo2rthrOPRQQNfycJPkjUAUclomCPH7QFVCDQ4Mp2k2K1SSM6m0zrxYrOwtFQw=="
},
"node_modules/@next/swc-darwin-arm64": {
- "version": "13.4.6",
- "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.4.6.tgz",
- "integrity": "sha512-ahi6VP98o4HV19rkOXPSUu+ovfHfUxbJQ7VVJ7gL2FnZRr7onEFC1oGQ6NQHpm8CxpIzSSBW79kumlFMOmZVjg==",
+ "version": "13.5.6",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.5.6.tgz",
+ "integrity": "sha512-5nvXMzKtZfvcu4BhtV0KH1oGv4XEW+B+jOfmBdpFI3C7FrB/MfujRpWYSBBO64+qbW8pkZiSyQv9eiwnn5VIQA==",
"cpu": [
"arm64"
],
@@ -1191,9 +1191,9 @@
}
},
"node_modules/@next/swc-darwin-x64": {
- "version": "13.4.6",
- "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.4.6.tgz",
- "integrity": "sha512-13cXxKFsPJIJKzUqrU5XB1mc0xbUgYsRcdH6/rB8c4NMEbWGdtD4QoK9ShN31TZdePpD4k416Ur7p+deMIxnnA==",
+ "version": "13.5.6",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.5.6.tgz",
+ "integrity": "sha512-6cgBfxg98oOCSr4BckWjLLgiVwlL3vlLj8hXg2b+nDgm4bC/qVXXLfpLB9FHdoDu4057hzywbxKvmYGmi7yUzA==",
"cpu": [
"x64"
],
@@ -1206,9 +1206,9 @@
}
},
"node_modules/@next/swc-linux-arm64-gnu": {
- "version": "13.4.6",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.4.6.tgz",
- "integrity": "sha512-Ti+NMHEjTNktCVxNjeWbYgmZvA2AqMMI2AMlzkXsU7W4pXCMhrryAmAIoo+7YdJbsx01JQWYVxGe62G6DoCLaA==",
+ "version": "13.5.6",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.5.6.tgz",
+ "integrity": "sha512-txagBbj1e1w47YQjcKgSU4rRVQ7uF29YpnlHV5xuVUsgCUf2FmyfJ3CPjZUvpIeXCJAoMCFAoGnbtX86BK7+sg==",
"cpu": [
"arm64"
],
@@ -1221,9 +1221,9 @@
}
},
"node_modules/@next/swc-linux-arm64-musl": {
- "version": "13.4.6",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.4.6.tgz",
- "integrity": "sha512-OHoC6gO7XfjstgwR+z6UHKlvhqJfyMtNaJidjx3sEcfaDwS7R2lqR5AABi8PuilGgi0BO0O0sCXqLlpp3a0emQ==",
+ "version": "13.5.6",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.5.6.tgz",
+ "integrity": "sha512-cGd+H8amifT86ZldVJtAKDxUqeFyLWW+v2NlBULnLAdWsiuuN8TuhVBt8ZNpCqcAuoruoSWynvMWixTFcroq+Q==",
"cpu": [
"arm64"
],
@@ -1236,9 +1236,9 @@
}
},
"node_modules/@next/swc-linux-x64-gnu": {
- "version": "13.4.6",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.4.6.tgz",
- "integrity": "sha512-zHZxPGkUlpfNJCboUrFqwlwEX5vI9LSN70b8XEb0DYzzlrZyCyOi7hwDp/+3Urm9AB7YCAJkgR5Sp1XBVjHdfQ==",
+ "version": "13.5.6",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.5.6.tgz",
+ "integrity": "sha512-Mc2b4xiIWKXIhBy2NBTwOxGD3nHLmq4keFk+d4/WL5fMsB8XdJRdtUlL87SqVCTSaf1BRuQQf1HvXZcy+rq3Nw==",
"cpu": [
"x64"
],
@@ -1251,9 +1251,9 @@
}
},
"node_modules/@next/swc-linux-x64-musl": {
- "version": "13.4.6",
- "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.4.6.tgz",
- "integrity": "sha512-K/Y8lYGTwTpv5ME8PSJxwxLolaDRdVy+lOd9yMRMiQE0BLUhtxtCWC9ypV42uh9WpLjoaD0joOsB9Q6mbrSGJg==",
+ "version": "13.5.6",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.5.6.tgz",
+ "integrity": "sha512-CFHvP9Qz98NruJiUnCe61O6GveKKHpJLloXbDSWRhqhkJdZD2zU5hG+gtVJR//tyW897izuHpM6Gtf6+sNgJPQ==",
"cpu": [
"x64"
],
@@ -1266,9 +1266,9 @@
}
},
"node_modules/@next/swc-win32-arm64-msvc": {
- "version": "13.4.6",
- "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.4.6.tgz",
- "integrity": "sha512-U6LtxEUrjBL2tpW+Kr1nHCSJWNeIed7U7l5o7FiKGGwGgIlFi4UHDiLI6TQ2lxi20fAU33CsruV3U0GuzMlXIw==",
+ "version": "13.5.6",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.5.6.tgz",
+ "integrity": "sha512-aFv1ejfkbS7PUa1qVPwzDHjQWQtknzAZWGTKYIAaS4NMtBlk3VyA6AYn593pqNanlicewqyl2jUhQAaFV/qXsg==",
"cpu": [
"arm64"
],
@@ -1281,9 +1281,9 @@
}
},
"node_modules/@next/swc-win32-ia32-msvc": {
- "version": "13.4.6",
- "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.4.6.tgz",
- "integrity": "sha512-eEBeAqpCfhdPSlCZCayjCiyIllVqy4tcqvm1xmg3BgJG0G5ITiMM4Cw2WVeRSgWDJqQGRyyb+q8Y2ltzhXOWsQ==",
+ "version": "13.5.6",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.5.6.tgz",
+ "integrity": "sha512-XqqpHgEIlBHvzwG8sp/JXMFkLAfGLqkbVsyN+/Ih1mR8INb6YCc2x/Mbwi6hsAgUnqQztz8cvEbHJUbSl7RHDg==",
"cpu": [
"ia32"
],
@@ -1296,9 +1296,9 @@
}
},
"node_modules/@next/swc-win32-x64-msvc": {
- "version": "13.4.6",
- "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.4.6.tgz",
- "integrity": "sha512-OrZs94AuO3ZS5tnqlyPRNgfWvboXaDQCi5aXGve3o3C+Sj0ctMUV9+Do+0zMvvLRumR8E0PTWKvtz9n5vzIsWw==",
+ "version": "13.5.6",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.5.6.tgz",
+ "integrity": "sha512-Cqfe1YmOS7k+5mGu92nl5ULkzpKuxJrP3+4AEuPmrpFZ3BHxTY3TnHmU1On3bFmFFs6FbTcdF58CCUProGpIGQ==",
"cpu": [
"x64"
],
@@ -1410,9 +1410,9 @@
}
},
"node_modules/@swc/helpers": {
- "version": "0.5.1",
- "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.1.tgz",
- "integrity": "sha512-sJ902EfIzn1Fa+qYmjdQqh8tPsoxyBz+8yBKC2HKUxyezKJFwPGOn7pv4WY6QuQW//ySQi5lJjA/ZT9sNWWNTg==",
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.2.tgz",
+ "integrity": "sha512-E4KcWTpoLHqwPHLxidpOqQbcrZVgi0rsmmZXUle1jXmJfuIf/UWpczUJ7MZZ5tlxytgJXyp0w4PGkkeLiuIdZw==",
"dependencies": {
"tslib": "^2.4.0"
}
@@ -5064,39 +5064,37 @@
"dev": true
},
"node_modules/next": {
- "version": "13.4.6",
- "resolved": "https://registry.npmjs.org/next/-/next-13.4.6.tgz",
- "integrity": "sha512-sjVqjxU+U2aXZnYt4Ud6CTLNNwWjdSfMgemGpIQJcN3Z7Jni9xRWbR0ie5fQzCg87aLqQVhKA2ud2gPoqJ9lGw==",
+ "version": "13.5.6",
+ "resolved": "https://registry.npmjs.org/next/-/next-13.5.6.tgz",
+ "integrity": "sha512-Y2wTcTbO4WwEsVb4A8VSnOsG1I9ok+h74q0ZdxkwM3EODqrs4pasq7O0iUxbcS9VtWMicG7f3+HAj0r1+NtKSw==",
"dependencies": {
- "@next/env": "13.4.6",
- "@swc/helpers": "0.5.1",
+ "@next/env": "13.5.6",
+ "@swc/helpers": "0.5.2",
"busboy": "1.6.0",
"caniuse-lite": "^1.0.30001406",
- "postcss": "8.4.14",
+ "postcss": "8.4.31",
"styled-jsx": "5.1.1",
- "watchpack": "2.4.0",
- "zod": "3.21.4"
+ "watchpack": "2.4.0"
},
"bin": {
"next": "dist/bin/next"
},
"engines": {
- "node": ">=16.8.0"
+ "node": ">=16.14.0"
},
"optionalDependencies": {
- "@next/swc-darwin-arm64": "13.4.6",
- "@next/swc-darwin-x64": "13.4.6",
- "@next/swc-linux-arm64-gnu": "13.4.6",
- "@next/swc-linux-arm64-musl": "13.4.6",
- "@next/swc-linux-x64-gnu": "13.4.6",
- "@next/swc-linux-x64-musl": "13.4.6",
- "@next/swc-win32-arm64-msvc": "13.4.6",
- "@next/swc-win32-ia32-msvc": "13.4.6",
- "@next/swc-win32-x64-msvc": "13.4.6"
+ "@next/swc-darwin-arm64": "13.5.6",
+ "@next/swc-darwin-x64": "13.5.6",
+ "@next/swc-linux-arm64-gnu": "13.5.6",
+ "@next/swc-linux-arm64-musl": "13.5.6",
+ "@next/swc-linux-x64-gnu": "13.5.6",
+ "@next/swc-linux-x64-musl": "13.5.6",
+ "@next/swc-win32-arm64-msvc": "13.5.6",
+ "@next/swc-win32-ia32-msvc": "13.5.6",
+ "@next/swc-win32-x64-msvc": "13.5.6"
},
"peerDependencies": {
"@opentelemetry/api": "^1.1.0",
- "fibers": ">= 3.1.0",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"sass": "^1.3.0"
@@ -5105,9 +5103,6 @@
"@opentelemetry/api": {
"optional": true
},
- "fibers": {
- "optional": true
- },
"sass": {
"optional": true
}
@@ -5419,9 +5414,9 @@
}
},
"node_modules/postcss": {
- "version": "8.4.14",
- "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.14.tgz",
- "integrity": "sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig==",
+ "version": "8.4.31",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
+ "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==",
"funding": [
{
"type": "opencollective",
@@ -5430,10 +5425,14 @@
{
"type": "tidelift",
"url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
}
],
"dependencies": {
- "nanoid": "^3.3.4",
+ "nanoid": "^3.3.6",
"picocolors": "^1.0.0",
"source-map-js": "^1.0.2"
},
@@ -6715,14 +6714,6 @@
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
- },
- "node_modules/zod": {
- "version": "3.21.4",
- "resolved": "https://registry.npmjs.org/zod/-/zod-3.21.4.tgz",
- "integrity": "sha512-m46AKbrzKVzOzs/DZgVnG5H55N1sv1M8qZU3A8RIKbs3mrACDNeIOeilDymVb2HdmP8uwshOCF4uJ8uM9rCqJw==",
- "funding": {
- "url": "https://github.com/sponsors/colinhacks"
- }
}
}
}
diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json
index 506a9d08c..171ba9c1a 100644
--- a/ecosystem-tests/vercel-edge/package.json
+++ b/ecosystem-tests/vercel-edge/package.json
@@ -15,7 +15,7 @@
},
"dependencies": {
"ai": "2.1.34",
- "next": "13.4.6",
+ "next": "13.5.6",
"react": "18.2.0",
"react-dom": "18.2.0"
},
From e84773e3813a2f71cd542c3b04eb9842d99eb0ca Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 23 Feb 2024 10:31:41 -0500
Subject: [PATCH 006/533] chore: update dependency @types/node to v20.11.20
(#690)
---
.../node-ts-cjs-auto/package-lock.json | 17 +++++++++++++----
ecosystem-tests/node-ts-cjs/package-lock.json | 17 +++++++++++++----
.../node-ts-esm-auto/package-lock.json | 17 +++++++++++++----
.../node-ts-esm-web/package-lock.json | 17 +++++++++++++----
ecosystem-tests/node-ts-esm/package-lock.json | 17 +++++++++++++----
.../node-ts4.5-jest27/package-lock.json | 17 +++++++++++++----
6 files changed, 78 insertions(+), 24 deletions(-)
diff --git a/ecosystem-tests/node-ts-cjs-auto/package-lock.json b/ecosystem-tests/node-ts-cjs-auto/package-lock.json
index df381f1b5..a11f9814d 100644
--- a/ecosystem-tests/node-ts-cjs-auto/package-lock.json
+++ b/ecosystem-tests/node-ts-cjs-auto/package-lock.json
@@ -1093,10 +1093,13 @@
}
},
"node_modules/@types/node": {
- "version": "20.5.7",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.7.tgz",
- "integrity": "sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==",
- "dev": true
+ "version": "20.11.20",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz",
+ "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==",
+ "dev": true,
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
},
"node_modules/@types/node-fetch": {
"version": "2.6.4",
@@ -3684,6 +3687,12 @@
"node": ">=4.2.0"
}
},
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "dev": true
+ },
"node_modules/update-browserslist-db": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz",
diff --git a/ecosystem-tests/node-ts-cjs/package-lock.json b/ecosystem-tests/node-ts-cjs/package-lock.json
index c39fc8f1c..c5280c5b5 100644
--- a/ecosystem-tests/node-ts-cjs/package-lock.json
+++ b/ecosystem-tests/node-ts-cjs/package-lock.json
@@ -1135,10 +1135,13 @@
}
},
"node_modules/@types/node": {
- "version": "20.5.7",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.7.tgz",
- "integrity": "sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==",
- "dev": true
+ "version": "20.11.20",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz",
+ "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==",
+ "dev": true,
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
},
"node_modules/@types/node-fetch": {
"version": "2.6.4",
@@ -4244,6 +4247,12 @@
"node": ">=4.2.0"
}
},
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "dev": true
+ },
"node_modules/universalify": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz",
diff --git a/ecosystem-tests/node-ts-esm-auto/package-lock.json b/ecosystem-tests/node-ts-esm-auto/package-lock.json
index 1123560d4..4bce04f80 100644
--- a/ecosystem-tests/node-ts-esm-auto/package-lock.json
+++ b/ecosystem-tests/node-ts-esm-auto/package-lock.json
@@ -1157,10 +1157,13 @@
}
},
"node_modules/@types/node": {
- "version": "20.5.7",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.7.tgz",
- "integrity": "sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==",
- "dev": true
+ "version": "20.11.20",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz",
+ "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==",
+ "dev": true,
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
},
"node_modules/@types/stack-utils": {
"version": "2.0.3",
@@ -3812,6 +3815,12 @@
"node": ">=4.2.0"
}
},
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "dev": true
+ },
"node_modules/update-browserslist-db": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz",
diff --git a/ecosystem-tests/node-ts-esm-web/package-lock.json b/ecosystem-tests/node-ts-esm-web/package-lock.json
index a2b14d348..b96128a4e 100644
--- a/ecosystem-tests/node-ts-esm-web/package-lock.json
+++ b/ecosystem-tests/node-ts-esm-web/package-lock.json
@@ -1157,10 +1157,13 @@
}
},
"node_modules/@types/node": {
- "version": "20.5.7",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.7.tgz",
- "integrity": "sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==",
- "dev": true
+ "version": "20.11.20",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz",
+ "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==",
+ "dev": true,
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
},
"node_modules/@types/stack-utils": {
"version": "2.0.3",
@@ -3812,6 +3815,12 @@
"node": ">=4.2.0"
}
},
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "dev": true
+ },
"node_modules/update-browserslist-db": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz",
diff --git a/ecosystem-tests/node-ts-esm/package-lock.json b/ecosystem-tests/node-ts-esm/package-lock.json
index 480a700fe..4aecff6ca 100644
--- a/ecosystem-tests/node-ts-esm/package-lock.json
+++ b/ecosystem-tests/node-ts-esm/package-lock.json
@@ -1157,10 +1157,13 @@
}
},
"node_modules/@types/node": {
- "version": "20.5.7",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-20.5.7.tgz",
- "integrity": "sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==",
- "dev": true
+ "version": "20.11.20",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz",
+ "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==",
+ "dev": true,
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
},
"node_modules/@types/stack-utils": {
"version": "2.0.3",
@@ -3812,6 +3815,12 @@
"node": ">=4.2.0"
}
},
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "dev": true
+ },
"node_modules/update-browserslist-db": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz",
diff --git a/ecosystem-tests/node-ts4.5-jest27/package-lock.json b/ecosystem-tests/node-ts4.5-jest27/package-lock.json
index f46e12de9..76813597f 100644
--- a/ecosystem-tests/node-ts4.5-jest27/package-lock.json
+++ b/ecosystem-tests/node-ts4.5-jest27/package-lock.json
@@ -1068,10 +1068,13 @@
}
},
"node_modules/@types/node": {
- "version": "20.6.0",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-20.6.0.tgz",
- "integrity": "sha512-najjVq5KN2vsH2U/xyh2opaSEz6cZMR2SetLIlxlj08nOcmPOemJmUK2o4kUzfLqfrWE0PIrNeE16XhYDd3nqg==",
- "dev": true
+ "version": "20.11.20",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz",
+ "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==",
+ "dev": true,
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
},
"node_modules/@types/node-fetch": {
"version": "2.6.5",
@@ -4117,6 +4120,12 @@
"node": ">=4.2.0"
}
},
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "dev": true
+ },
"node_modules/universalify": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz",
From 0372eaaf6f33bfbc3cb6294a2fd6b3bab9e7ba80 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 23 Feb 2024 19:18:24 -0500
Subject: [PATCH 007/533] feat(api): add wav and pcm to response_format (#691)
---
src/resources/audio/speech.ts | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts
index faa281686..d5ef09118 100644
--- a/src/resources/audio/speech.ts
+++ b/src/resources/audio/speech.ts
@@ -35,9 +35,13 @@ export interface SpeechCreateParams {
voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
/**
- * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`.
+ * The format to return audio in. Supported formats are `mp3`, `opus`, `aac`,
+ * `flac`, `pcm`, and `wav`.
+ *
+ * The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz
+ * sample rate, mono channel, and 16-bit depth in signed little-endian format.
*/
- response_format?: 'mp3' | 'opus' | 'aac' | 'flac';
+ response_format?: 'mp3' | 'opus' | 'aac' | 'flac' | 'pcm' | 'wav';
/**
* The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
From 5961cb8cea11065efd1ffee9db14f19ad7054ad5 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Sat, 24 Feb 2024 16:17:31 +0100
Subject: [PATCH 008/533] chore(internal): fix ecosystem tests (#693)
---
.../node-ts-cjs-auto/moduleResolution/node/type-tests.ts | 1 -
1 file changed, 1 deletion(-)
diff --git a/ecosystem-tests/node-ts-cjs-auto/moduleResolution/node/type-tests.ts b/ecosystem-tests/node-ts-cjs-auto/moduleResolution/node/type-tests.ts
index a3c4f383b..2621b2b47 100644
--- a/ecosystem-tests/node-ts-cjs-auto/moduleResolution/node/type-tests.ts
+++ b/ecosystem-tests/node-ts-cjs-auto/moduleResolution/node/type-tests.ts
@@ -9,6 +9,5 @@ async function typeTests() {
model: 'whisper-1',
})
.asResponse();
- // @ts-expect-error this doesn't work with "moduleResolution": "node"
response.body;
}
From 6175eca426b15990be5e5cdb0e8497e547f87d8a Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Wed, 28 Feb 2024 06:06:00 +0100
Subject: [PATCH 009/533] release: 4.28.4
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 19 +++++++++++++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 24 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 8d5375100..5934251e9 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.28.3"
+ ".": "4.28.4"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 274b8e8a5..68ebe3767 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,24 @@
# Changelog
+## 4.28.4 (2024-02-28)
+
+Full Changelog: [v4.28.3...v4.28.4](https://github.com/openai/openai-node/compare/v4.28.3...v4.28.4)
+
+### Features
+
+* **api:** add wav and pcm to response_format ([#691](https://github.com/openai/openai-node/issues/691)) ([b1c6171](https://github.com/openai/openai-node/commit/b1c61711961a62a4d7b47909a68ecd65231a66af))
+
+
+### Chores
+
+* **ci:** update actions/setup-node action to v4 ([#685](https://github.com/openai/openai-node/issues/685)) ([f2704d5](https://github.com/openai/openai-node/commit/f2704d5f1580c0f1d31584ef88702cde8f6804d4))
+* **internal:** fix ecosystem tests ([#693](https://github.com/openai/openai-node/issues/693)) ([616624d](https://github.com/openai/openai-node/commit/616624d3d9fd10ce254ce0d435b2b73ed11679f2))
+* **types:** extract run status to a named type ([#686](https://github.com/openai/openai-node/issues/686)) ([b3b3b8e](https://github.com/openai/openai-node/commit/b3b3b8ea20e0f311d3bd53dfd22ccc04f5dce5f7))
+* update @types/react to 18.2.58, @types/react-dom to 18.2.19 ([#688](https://github.com/openai/openai-node/issues/688)) ([2a0d0b1](https://github.com/openai/openai-node/commit/2a0d0b1cb197eef25e42bbba88ee90c37d623f24))
+* update dependency @types/node to v20.11.20 ([#690](https://github.com/openai/openai-node/issues/690)) ([4ca005b](https://github.com/openai/openai-node/commit/4ca005be082d6c50fe95da6148896b62080bfe07))
+* update dependency @types/ws to v8.5.10 ([#683](https://github.com/openai/openai-node/issues/683)) ([a617268](https://github.com/openai/openai-node/commit/a6172683a3390422984ad282ac4940781493e772))
+* update dependency next to v13.5.6 ([#689](https://github.com/openai/openai-node/issues/689)) ([abb3b66](https://github.com/openai/openai-node/commit/abb3b6674b8f9f8ff9c2cc61629a31883ae4d8c8))
+
## 4.28.3 (2024-02-20)
Full Changelog: [v4.28.2...v4.28.3](https://github.com/openai/openai-node/compare/v4.28.2...v4.28.3)
diff --git a/README.md b/README.md
index c7779e79a..ef174634e 100644
--- a/README.md
+++ b/README.md
@@ -21,7 +21,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.28.3/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.28.4/mod.ts';
```
diff --git a/build-deno b/build-deno
index c8215c85d..74d994d08 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.28.3/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.28.4/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index 455c0d180..65d6046f6 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.28.3",
+ "version": "4.28.4",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index 3975f7a3e..9dd894067 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.28.3'; // x-release-please-version
+export const VERSION = '4.28.4'; // x-release-please-version
From 08c5974033dfdb3e60ad50305e2a9aafd586d3f2 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 29 Feb 2024 17:39:28 +0100
Subject: [PATCH 010/533] docs(contributing): improve wording (#696)
---
CONTRIBUTING.md | 6 +++---
README.md | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 61f37370f..693e9ea70 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -3,7 +3,7 @@
This repository uses [`yarn@v1`](https://classic.yarnpkg.com/lang/en/docs/install/#mac-stable).
Other package managers may work but are not officially supported for development.
-To setup the repository, run:
+To set up the repository, run:
```bash
yarn
@@ -65,7 +65,7 @@ pnpm link -—global openai
## Running tests
-Most tests will require you to [setup a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests.
+Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests.
```bash
npx prism path/to/your/openapi.yml
@@ -99,7 +99,7 @@ the changes aren't made through the automated pipeline, you may want to make rel
### Publish with a GitHub workflow
-You can release to package managers by using [the `Publish NPM` GitHub action](https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml). This will require a setup organization or repository secret to be set up.
+You can release to package managers by using [the `Publish NPM` GitHub action](https://www.github.com/openai/openai-node/actions/workflows/publish-npm.yml). This requires a setup organization or repository secret to be set up.
### Publish manually
diff --git a/README.md b/README.md
index ef174634e..e8ff603e9 100644
--- a/README.md
+++ b/README.md
@@ -424,7 +424,7 @@ import OpenAI from 'openai';
```
To do the inverse, add `import "openai/shims/node"` (which does import polyfills).
-This can also be useful if you are getting the wrong TypeScript types for `Response` - more details [here](https://github.com/openai/openai-node/tree/master/src/_shims#readme).
+This can also be useful if you are getting the wrong TypeScript types for `Response` ([more details](https://github.com/openai/openai-node/tree/master/src/_shims#readme)).
You may also provide a custom `fetch` function when instantiating the client,
which can be used to inspect or alter the `Request` or `Response` before/after each request:
From c3fee07c78fef9115da353fab8f5e399f81cdc93 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 29 Feb 2024 21:56:48 +0100
Subject: [PATCH 011/533] docs(readme): fix typo in custom fetch implementation
(#698)
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index e8ff603e9..68d356f8f 100644
--- a/README.md
+++ b/README.md
@@ -434,7 +434,7 @@ import { fetch } from 'undici'; // as one example
import OpenAI from 'openai';
const client = new OpenAI({
- fetch: async (url: RequestInfo, init?: RequestInfo): Promise => {
+ fetch: async (url: RequestInfo, init?: RequestInit): Promise => {
console.log('About to make a request', url, init);
const response = await fetch(url, init);
console.log('Got response', response);
From 181a5dddb650f1b060b88cbe3bf7293ddfecebdf Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 1 Mar 2024 01:32:50 +0100
Subject: [PATCH 012/533] fix(ChatCompletionStream): abort on async iterator
break and handle errors (#699)
`break`-ing the async iterator did not previously abort the request which increases usage.
Errors are now handled more effectively in the async iterator.
---
src/lib/ChatCompletionRunFunctions.test.ts | 53 +++++++++++++++++++++-
src/lib/ChatCompletionStream.ts | 35 +++++++++++---
2 files changed, 81 insertions(+), 7 deletions(-)
diff --git a/src/lib/ChatCompletionRunFunctions.test.ts b/src/lib/ChatCompletionRunFunctions.test.ts
index bb360b217..b524218ae 100644
--- a/src/lib/ChatCompletionRunFunctions.test.ts
+++ b/src/lib/ChatCompletionRunFunctions.test.ts
@@ -1,5 +1,5 @@
import OpenAI from 'openai';
-import { OpenAIError } from 'openai/error';
+import { OpenAIError, APIConnectionError } from 'openai/error';
import { PassThrough } from 'stream';
import {
ParsingToolFunction,
@@ -2207,6 +2207,7 @@ describe('resource completions', () => {
await listener.sanityCheck();
});
});
+
describe('stream', () => {
test('successful flow', async () => {
const { fetch, handleRequest } = mockStreamingChatCompletionFetch();
@@ -2273,5 +2274,55 @@ describe('resource completions', () => {
expect(listener.finalMessage).toEqual({ role: 'assistant', content: 'The weather is great today!' });
await listener.sanityCheck();
});
+ test('handles network errors', async () => {
+ const { fetch, handleRequest } = mockFetch();
+
+ const openai = new OpenAI({ apiKey: '...', fetch });
+
+ const stream = openai.beta.chat.completions.stream(
+ {
+ max_tokens: 1024,
+ model: 'gpt-3.5-turbo',
+ messages: [{ role: 'user', content: 'Say hello there!' }],
+ },
+ { maxRetries: 0 },
+ );
+
+ handleRequest(async () => {
+ throw new Error('mock request error');
+ }).catch(() => {});
+
+ async function runStream() {
+ await stream.done();
+ }
+
+ await expect(runStream).rejects.toThrow(APIConnectionError);
+ });
+ test('handles network errors on async iterator', async () => {
+ const { fetch, handleRequest } = mockFetch();
+
+ const openai = new OpenAI({ apiKey: '...', fetch });
+
+ const stream = openai.beta.chat.completions.stream(
+ {
+ max_tokens: 1024,
+ model: 'gpt-3.5-turbo',
+ messages: [{ role: 'user', content: 'Say hello there!' }],
+ },
+ { maxRetries: 0 },
+ );
+
+ handleRequest(async () => {
+ throw new Error('mock request error');
+ }).catch(() => {});
+
+ async function runStream() {
+ for await (const _event of stream) {
+ continue;
+ }
+ }
+
+ await expect(runStream).rejects.toThrow(APIConnectionError);
+ });
});
});
diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts
index a2aa7032e..2ea040383 100644
--- a/src/lib/ChatCompletionStream.ts
+++ b/src/lib/ChatCompletionStream.ts
@@ -210,13 +210,16 @@ export class ChatCompletionStream
[Symbol.asyncIterator](): AsyncIterator {
const pushQueue: ChatCompletionChunk[] = [];
- const readQueue: ((chunk: ChatCompletionChunk | undefined) => void)[] = [];
+ const readQueue: {
+ resolve: (chunk: ChatCompletionChunk | undefined) => void;
+ reject: (err: unknown) => void;
+ }[] = [];
let done = false;
this.on('chunk', (chunk) => {
const reader = readQueue.shift();
if (reader) {
- reader(chunk);
+ reader.resolve(chunk);
} else {
pushQueue.push(chunk);
}
@@ -225,7 +228,23 @@ export class ChatCompletionStream
this.on('end', () => {
done = true;
for (const reader of readQueue) {
- reader(undefined);
+ reader.resolve(undefined);
+ }
+ readQueue.length = 0;
+ });
+
+ this.on('abort', (err) => {
+ done = true;
+ for (const reader of readQueue) {
+ reader.reject(err);
+ }
+ readQueue.length = 0;
+ });
+
+ this.on('error', (err) => {
+ done = true;
+ for (const reader of readQueue) {
+ reader.reject(err);
}
readQueue.length = 0;
});
@@ -236,13 +255,17 @@ export class ChatCompletionStream
if (done) {
return { value: undefined, done: true };
}
- return new Promise((resolve) => readQueue.push(resolve)).then(
- (chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true }),
- );
+ return new Promise((resolve, reject) =>
+ readQueue.push({ resolve, reject }),
+ ).then((chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true }));
}
const chunk = pushQueue.shift()!;
return { value: chunk, done: false };
},
+ return: async () => {
+ this.abort();
+ return { value: undefined, done: true };
+ },
};
}
From 18d9cb729d23871976368e8b5c40515661a8bd4b Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 1 Mar 2024 14:57:12 +0100
Subject: [PATCH 013/533] chore(docs): mention install from git repo (#700)
---
CONTRIBUTING.md | 2 ++
README.md | 1 +
2 files changed, 3 insertions(+)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 693e9ea70..297322d17 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -43,6 +43,8 @@ To install via git:
```bash
npm install --save git+ssh://git@github.com:openai/openai-node.git
+# or
+yarn add git+ssh://git@github.com:openai/openai-node.git
```
Alternatively, to link a local copy of the repo:
diff --git a/README.md b/README.md
index 68d356f8f..dd3ac15c0 100644
--- a/README.md
+++ b/README.md
@@ -11,6 +11,7 @@ To learn how to use the OpenAI API, check out our [API Reference](https://platfo
## Installation
```sh
+# install from NPM
npm install --save openai
# or
yarn add openai
From c21ef88b650b996dd0cf97f36294db464573b531 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 4 Mar 2024 19:17:09 +0100
Subject: [PATCH 014/533] chore(api): update docs (#703)
---
src/resources/audio/speech.ts | 9 +++------
src/resources/audio/transcriptions.ts | 18 ++++++++++++++----
src/resources/audio/translations.ts | 3 ++-
src/resources/beta/threads/runs/runs.ts | 4 ++--
src/resources/chat/completions.ts | 14 +++++++++-----
src/resources/images.ts | 9 ++++++---
src/resources/moderations.ts | 8 +++-----
7 files changed, 39 insertions(+), 26 deletions(-)
diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts
index d5ef09118..7d0ee2195 100644
--- a/src/resources/audio/speech.ts
+++ b/src/resources/audio/speech.ts
@@ -35,13 +35,10 @@ export interface SpeechCreateParams {
voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
/**
- * The format to return audio in. Supported formats are `mp3`, `opus`, `aac`,
- * `flac`, `pcm`, and `wav`.
- *
- * The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz
- * sample rate, mono channel, and 16-bit depth in signed little-endian format.
+ * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
+ * `wav`, and `pcm`.
*/
- response_format?: 'mp3' | 'opus' | 'aac' | 'flac' | 'pcm' | 'wav';
+ response_format?: 'mp3' | 'opus' | 'aac' | 'flac' | 'wav' | 'pcm';
/**
* The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts
index 7f381c5a3..ab2079ed6 100644
--- a/src/resources/audio/transcriptions.ts
+++ b/src/resources/audio/transcriptions.ts
@@ -14,7 +14,14 @@ export class Transcriptions extends APIResource {
}
}
+/**
+ * Represents a transcription response returned by model, based on the provided
+ * input.
+ */
export interface Transcription {
+ /**
+ * The transcribed text.
+ */
text: string;
}
@@ -26,7 +33,8 @@ export interface TranscriptionCreateParams {
file: Uploadable;
/**
- * ID of the model to use. Only `whisper-1` is currently available.
+ * ID of the model to use. Only `whisper-1` (which is powered by our open source
+ * Whisper V2 model) is currently available.
*/
model: (string & {}) | 'whisper-1';
@@ -61,9 +69,11 @@ export interface TranscriptionCreateParams {
temperature?: number;
/**
- * The timestamp granularities to populate for this transcription. Any of these
- * options: `word`, or `segment`. Note: There is no additional latency for segment
- * timestamps, but generating word timestamps incurs additional latency.
+ * The timestamp granularities to populate for this transcription.
+ * `response_format` must be set `verbose_json` to use timestamp granularities.
+ * Either or both of these options are supported: `word`, or `segment`. Note: There
+ * is no additional latency for segment timestamps, but generating word timestamps
+ * incurs additional latency.
*/
timestamp_granularities?: Array<'word' | 'segment'>;
}
diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts
index 54583ce1f..e68a714fb 100644
--- a/src/resources/audio/translations.ts
+++ b/src/resources/audio/translations.ts
@@ -26,7 +26,8 @@ export interface TranslationCreateParams {
file: Uploadable;
/**
- * ID of the model to use. Only `whisper-1` is currently available.
+ * ID of the model to use. Only `whisper-1` (which is powered by our open source
+ * Whisper V2 model) is currently available.
*/
model: (string & {}) | 'whisper-1';
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index 9582a060b..9a0bc00dd 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -270,9 +270,9 @@ export namespace Run {
*/
export interface LastError {
/**
- * One of `server_error` or `rate_limit_exceeded`.
+ * One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.
*/
- code: 'server_error' | 'rate_limit_exceeded';
+ code: 'server_error' | 'rate_limit_exceeded' | 'invalid_prompt';
/**
* A human-readable description of the error.
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index 2a5216745..44627eb85 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -546,7 +546,9 @@ export interface ChatCompletionTokenLogprob {
bytes: Array | null;
/**
- * The log probability of this token.
+ * The log probability of this token, if it is within the top 20 most likely
+ * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
+ * unlikely.
*/
logprob: number;
@@ -574,7 +576,9 @@ export namespace ChatCompletionTokenLogprob {
bytes: Array | null;
/**
- * The log probability of this token.
+ * The log probability of this token, if it is within the top 20 most likely
+ * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
+ * unlikely.
*/
logprob: number;
}
@@ -827,9 +831,9 @@ export interface ChatCompletionCreateParamsBase {
tools?: Array;
/**
- * An integer between 0 and 5 specifying the number of most likely tokens to return
- * at each token position, each with an associated log probability. `logprobs` must
- * be set to `true` if this parameter is used.
+ * An integer between 0 and 20 specifying the number of most likely tokens to
+ * return at each token position, each with an associated log probability.
+ * `logprobs` must be set to `true` if this parameter is used.
*/
top_logprobs?: number | null;
diff --git a/src/resources/images.ts b/src/resources/images.ts
index 4bc654903..bc5b9edc0 100644
--- a/src/resources/images.ts
+++ b/src/resources/images.ts
@@ -80,7 +80,8 @@ export interface ImageCreateVariationParams {
/**
* The format in which the generated images are returned. Must be one of `url` or
- * `b64_json`.
+ * `b64_json`. URLs are only valid for 60 minutes after the image has been
+ * generated.
*/
response_format?: 'url' | 'b64_json' | null;
@@ -131,7 +132,8 @@ export interface ImageEditParams {
/**
* The format in which the generated images are returned. Must be one of `url` or
- * `b64_json`.
+ * `b64_json`. URLs are only valid for 60 minutes after the image has been
+ * generated.
*/
response_format?: 'url' | 'b64_json' | null;
@@ -176,7 +178,8 @@ export interface ImageGenerateParams {
/**
* The format in which the generated images are returned. Must be one of `url` or
- * `b64_json`.
+ * `b64_json`. URLs are only valid for 60 minutes after the image has been
+ * generated.
*/
response_format?: 'url' | 'b64_json' | null;
diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts
index 8bde6ecca..a43006ccf 100644
--- a/src/resources/moderations.ts
+++ b/src/resources/moderations.ts
@@ -6,7 +6,7 @@ import * as ModerationsAPI from 'openai/resources/moderations';
export class Moderations extends APIResource {
/**
- * Classifies if text violates OpenAI's Content Policy
+ * Classifies if text is potentially harmful.
*/
create(
body: ModerationCreateParams,
@@ -28,8 +28,7 @@ export interface Moderation {
category_scores: Moderation.CategoryScores;
/**
- * Whether the content violates
- * [OpenAI's usage policies](/policies/usage-policies).
+ * Whether any of the below categories are flagged.
*/
flagged: boolean;
}
@@ -170,8 +169,7 @@ export namespace Moderation {
}
/**
- * Represents policy compliance report by OpenAI's content moderation model against
- * a given input.
+ * Represents if a given text input is potentially harmful.
*/
export interface ModerationCreateResponse {
/**
From 34e128fad382d1aeac9912d85c50291c4882d298 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 4 Mar 2024 21:53:09 +0100
Subject: [PATCH 015/533] chore: fix error handler in readme (#704)
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index dd3ac15c0..1cfb2537a 100644
--- a/README.md
+++ b/README.md
@@ -275,7 +275,7 @@ a subclass of `APIError` will be thrown:
async function main() {
const job = await openai.fineTuning.jobs
.create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123' })
- .catch((err) => {
+ .catch(async (err) => {
if (err instanceof OpenAI.APIError) {
console.log(err.status); // 400
console.log(err.name); // BadRequestError
From 588b30f6f5604387cb2ade716b6cf693e1175cec Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 5 Mar 2024 12:10:53 +0100
Subject: [PATCH 016/533] docs(readme): fix https proxy example (#705)
---
README.md | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/README.md b/README.md
index 1cfb2537a..1207d5d24 100644
--- a/README.md
+++ b/README.md
@@ -456,7 +456,7 @@ If you would like to disable or customize this behavior, for example to use the
```ts
import http from 'http';
-import HttpsProxyAgent from 'https-proxy-agent';
+import { HttpsProxyAgent } from 'https-proxy-agent';
// Configure the default for all requests:
const openai = new OpenAI({
@@ -465,9 +465,8 @@ const openai = new OpenAI({
// Override per-request:
await openai.models.list({
- baseURL: 'http://localhost:8080/test-api',
httpAgent: new http.Agent({ keepAlive: false }),
-})
+});
```
## Semantic Versioning
From 753bced18a57cd4a7739e8e03a7b7933048be79f Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Wed, 6 Mar 2024 17:42:34 +0100
Subject: [PATCH 017/533] fix(streaming): correctly handle trailing new lines
in byte chunks (#708)
---
src/streaming.ts | 8 +++++++-
tests/streaming.test.ts | 42 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 49 insertions(+), 1 deletion(-)
create mode 100644 tests/streaming.test.ts
diff --git a/src/streaming.ts b/src/streaming.ts
index 7d8b4442a..1b59bce20 100644
--- a/src/streaming.ts
+++ b/src/streaming.ts
@@ -267,7 +267,7 @@ class SSEDecoder {
*
* https://github.com/encode/httpx/blob/920333ea98118e9cf617f246905d7b202510941c/httpx/_decoders.py#L258
*/
-class LineDecoder {
+export class LineDecoder {
// prettier-ignore
static NEWLINE_CHARS = new Set(['\n', '\r', '\x0b', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029']);
static NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g;
@@ -300,6 +300,12 @@ class LineDecoder {
const trailingNewline = LineDecoder.NEWLINE_CHARS.has(text[text.length - 1] || '');
let lines = text.split(LineDecoder.NEWLINE_REGEXP);
+ // if there is a trailing new line then the last entry will be an empty
+ // string which we don't care about
+ if (trailingNewline) {
+ lines.pop();
+ }
+
if (lines.length === 1 && !trailingNewline) {
this.buffer.push(lines[0]!);
return [];
diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts
new file mode 100644
index 000000000..45cf6f6cd
--- /dev/null
+++ b/tests/streaming.test.ts
@@ -0,0 +1,42 @@
+import { LineDecoder } from 'openai/streaming';
+
+function decodeChunks(chunks: string[], decoder?: LineDecoder): string[] {
+ if (!decoder) {
+ decoder = new LineDecoder();
+ }
+
+ const lines = [];
+ for (const chunk of chunks) {
+ lines.push(...decoder.decode(chunk));
+ }
+
+ return lines;
+}
+
+describe('line decoder', () => {
+ test('basic', () => {
+ // baz is not included because the line hasn't ended yet
+ expect(decodeChunks(['foo', ' bar\nbaz'])).toEqual(['foo bar']);
+ });
+
+ test('basic with \\r', () => {
+ // baz is not included because the line hasn't ended yet
+ expect(decodeChunks(['foo', ' bar\r\nbaz'])).toEqual(['foo bar']);
+ });
+
+ test('trailing new lines', () => {
+ expect(decodeChunks(['foo', ' bar', 'baz\n', 'thing\n'])).toEqual(['foo barbaz', 'thing']);
+ });
+
+ test('trailing new lines with \\r', () => {
+ expect(decodeChunks(['foo', ' bar', 'baz\r\n', 'thing\r\n'])).toEqual(['foo barbaz', 'thing']);
+ });
+
+ test('escaped new lines', () => {
+ expect(decodeChunks(['foo', ' bar\\nbaz\n'])).toEqual(['foo bar\\nbaz']);
+ });
+
+ test('escaped new lines with \\r', () => {
+ expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']);
+ });
+});
From e0deb2285fb35fac8096ebfe6ed5f9dcd1a8b7f0 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Wed, 6 Mar 2024 19:13:10 +0100
Subject: [PATCH 018/533] chore(types): fix accidental exposure of Buffer type
to cloudflare (#709)
---
src/streaming.ts | 13 ++++++++++++-
tests/streaming.test.ts | 15 +--------------
2 files changed, 13 insertions(+), 15 deletions(-)
diff --git a/src/streaming.ts b/src/streaming.ts
index 1b59bce20..7b0466a3c 100644
--- a/src/streaming.ts
+++ b/src/streaming.ts
@@ -267,7 +267,7 @@ class SSEDecoder {
*
* https://github.com/encode/httpx/blob/920333ea98118e9cf617f246905d7b202510941c/httpx/_decoders.py#L258
*/
-export class LineDecoder {
+class LineDecoder {
// prettier-ignore
static NEWLINE_CHARS = new Set(['\n', '\r', '\x0b', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029']);
static NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g;
@@ -372,6 +372,17 @@ export class LineDecoder {
}
}
+/** This is an internal helper function that's just used for testing */
+export function _decodeChunks(chunks: string[]): string[] {
+ const decoder = new LineDecoder();
+ const lines = [];
+ for (const chunk of chunks) {
+ lines.push(...decoder.decode(chunk));
+ }
+
+ return lines;
+}
+
function partition(str: string, delimiter: string): [string, string, string] {
const index = str.indexOf(delimiter);
if (index !== -1) {
diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts
index 45cf6f6cd..479b2a341 100644
--- a/tests/streaming.test.ts
+++ b/tests/streaming.test.ts
@@ -1,17 +1,4 @@
-import { LineDecoder } from 'openai/streaming';
-
-function decodeChunks(chunks: string[], decoder?: LineDecoder): string[] {
- if (!decoder) {
- decoder = new LineDecoder();
- }
-
- const lines = [];
- for (const chunk of chunks) {
- lines.push(...decoder.decode(chunk));
- }
-
- return lines;
-}
+import { _decodeChunks as decodeChunks } from 'openai/streaming';
describe('line decoder', () => {
test('basic', () => {
From 0825acf85cd50d02b63a875481aadd5ec6cc6aad Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Wed, 6 Mar 2024 21:12:48 +0100
Subject: [PATCH 019/533] docs: remove extraneous --save and yarn install
instructions (#710)
---
CONTRIBUTING.md | 4 +---
README.md | 5 +----
2 files changed, 2 insertions(+), 7 deletions(-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 297322d17..d9e64025d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -42,9 +42,7 @@ If you’d like to use the repository from source, you can either install from g
To install via git:
```bash
-npm install --save git+ssh://git@github.com:openai/openai-node.git
-# or
-yarn add git+ssh://git@github.com:openai/openai-node.git
+npm install git+ssh://git@github.com:openai/openai-node.git
```
Alternatively, to link a local copy of the repo:
diff --git a/README.md b/README.md
index 1207d5d24..28262aaca 100644
--- a/README.md
+++ b/README.md
@@ -11,10 +11,7 @@ To learn how to use the OpenAI API, check out our [API Reference](https://platfo
## Installation
```sh
-# install from NPM
-npm install --save openai
-# or
-yarn add openai
+npm install openai
```
You can import in Deno via:
From 50206a06974d558d9df7d8649cc2c71822e67472 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 7 Mar 2024 19:13:22 +0100
Subject: [PATCH 020/533] docs: use @deprecated decorator for deprecated params
(#711)
---
src/resources/chat/completions.ts | 30 ++++++++++++++++++------------
src/resources/files.ts | 8 ++++----
2 files changed, 22 insertions(+), 16 deletions(-)
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index 44627eb85..c2d6da0be 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -133,8 +133,8 @@ export interface ChatCompletionAssistantMessageParam {
content?: string | null;
/**
- * Deprecated and replaced by `tool_calls`. The name and arguments of a function
- * that should be called, as generated by the model.
+ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
+ * a function that should be called, as generated by the model.
*/
function_call?: ChatCompletionAssistantMessageParam.FunctionCall;
@@ -152,8 +152,8 @@ export interface ChatCompletionAssistantMessageParam {
export namespace ChatCompletionAssistantMessageParam {
/**
- * Deprecated and replaced by `tool_calls`. The name and arguments of a function
- * that should be called, as generated by the model.
+ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
+ * a function that should be called, as generated by the model.
*/
export interface FunctionCall {
/**
@@ -250,8 +250,8 @@ export namespace ChatCompletionChunk {
content?: string | null;
/**
- * Deprecated and replaced by `tool_calls`. The name and arguments of a function
- * that should be called, as generated by the model.
+ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
+ * a function that should be called, as generated by the model.
*/
function_call?: Delta.FunctionCall;
@@ -265,8 +265,8 @@ export namespace ChatCompletionChunk {
export namespace Delta {
/**
- * Deprecated and replaced by `tool_calls`. The name and arguments of a function
- * that should be called, as generated by the model.
+ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
+ * a function that should be called, as generated by the model.
*/
export interface FunctionCall {
/**
@@ -378,6 +378,9 @@ export interface ChatCompletionFunctionCallOption {
name: string;
}
+/**
+ * @deprecated
+ */
export interface ChatCompletionFunctionMessageParam {
/**
* The contents of the function message.
@@ -410,8 +413,8 @@ export interface ChatCompletionMessage {
role: 'assistant';
/**
- * Deprecated and replaced by `tool_calls`. The name and arguments of a function
- * that should be called, as generated by the model.
+ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
+ * a function that should be called, as generated by the model.
*/
function_call?: ChatCompletionMessage.FunctionCall;
@@ -423,8 +426,8 @@ export interface ChatCompletionMessage {
export namespace ChatCompletionMessage {
/**
- * Deprecated and replaced by `tool_calls`. The name and arguments of a function
- * that should be called, as generated by the model.
+ * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
+ * a function that should be called, as generated by the model.
*/
export interface FunctionCall {
/**
@@ -855,6 +858,9 @@ export interface ChatCompletionCreateParamsBase {
}
export namespace ChatCompletionCreateParams {
+ /**
+ * @deprecated
+ */
export interface Function {
/**
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
diff --git a/src/resources/files.ts b/src/resources/files.ts
index db8f3a66a..cda487a63 100644
--- a/src/resources/files.ts
+++ b/src/resources/files.ts
@@ -154,14 +154,14 @@ export interface FileObject {
purpose: 'fine-tune' | 'fine-tune-results' | 'assistants' | 'assistants_output';
/**
- * Deprecated. The current status of the file, which can be either `uploaded`,
- * `processed`, or `error`.
+ * @deprecated: Deprecated. The current status of the file, which can be either
+ * `uploaded`, `processed`, or `error`.
*/
status: 'uploaded' | 'processed' | 'error';
/**
- * Deprecated. For details on why a fine-tuning training file failed validation,
- * see the `error` field on `fine_tuning.job`.
+ * @deprecated: Deprecated. For details on why a fine-tuning training file failed
+ * validation, see the `error` field on `fine_tuning.job`.
*/
status_details?: string;
}
From c71ad7062dc778a3675b104650b21877e811956b Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 7 Mar 2024 21:10:51 +0100
Subject: [PATCH 021/533] chore(internal): add explicit type annotation to
decoder (#712)
---
src/streaming.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/streaming.ts b/src/streaming.ts
index 7b0466a3c..f90c5d89a 100644
--- a/src/streaming.ts
+++ b/src/streaming.ts
@@ -375,7 +375,7 @@ class LineDecoder {
/** This is an internal helper function that's just used for testing */
export function _decodeChunks(chunks: string[]): string[] {
const decoder = new LineDecoder();
- const lines = [];
+ const lines: string[] = [];
for (const chunk of chunks) {
lines.push(...decoder.decode(chunk));
}
From beea0c7c6b6b8611f3b95c02fb35e74855f7ba03 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Wed, 13 Mar 2024 01:06:20 -0400
Subject: [PATCH 022/533] release: 4.28.5
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 27 +++++++++++++++++++++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 32 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 5934251e9..2813cb972 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.28.4"
+ ".": "4.28.5"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 68ebe3767..8798e4b66 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,32 @@
# Changelog
+## 4.28.5 (2024-03-13)
+
+Full Changelog: [v4.28.4...v4.28.5](https://github.com/openai/openai-node/compare/v4.28.4...v4.28.5)
+
+### Bug Fixes
+
+* **ChatCompletionStream:** abort on async iterator break and handle errors ([#699](https://github.com/openai/openai-node/issues/699)) ([ac417a2](https://github.com/openai/openai-node/commit/ac417a2db31919d2b52f2eb2e38f9c67a8f73254))
+* **streaming:** correctly handle trailing new lines in byte chunks ([#708](https://github.com/openai/openai-node/issues/708)) ([4753be2](https://github.com/openai/openai-node/commit/4753be272b1d1dade7a769cf350b829fc639f36e))
+
+
+### Chores
+
+* **api:** update docs ([#703](https://github.com/openai/openai-node/issues/703)) ([e1db98b](https://github.com/openai/openai-node/commit/e1db98bef29d200e2e401e3f5d7b2db6839c7836))
+* **docs:** mention install from git repo ([#700](https://github.com/openai/openai-node/issues/700)) ([c081bdb](https://github.com/openai/openai-node/commit/c081bdbb55585e63370496d324dc6f94d86424d1))
+* fix error handler in readme ([#704](https://github.com/openai/openai-node/issues/704)) ([4ff790a](https://github.com/openai/openai-node/commit/4ff790a67cf876191e04ad0e369e447e080b78a7))
+* **internal:** add explicit type annotation to decoder ([#712](https://github.com/openai/openai-node/issues/712)) ([d728e99](https://github.com/openai/openai-node/commit/d728e9923554e4c72c9efa3bd528561400d50ad8))
+* **types:** fix accidental exposure of Buffer type to cloudflare ([#709](https://github.com/openai/openai-node/issues/709)) ([0323ecb](https://github.com/openai/openai-node/commit/0323ecb98ddbd8910fc5719c8bab5175b945d2ab))
+
+
+### Documentation
+
+* **contributing:** improve wording ([#696](https://github.com/openai/openai-node/issues/696)) ([940d569](https://github.com/openai/openai-node/commit/940d5695f4cacddbb58e3bfc50fec28c468c7e63))
+* **readme:** fix https proxy example ([#705](https://github.com/openai/openai-node/issues/705)) ([d144789](https://github.com/openai/openai-node/commit/d1447890a556d37928b628f6449bb80de224d207))
+* **readme:** fix typo in custom fetch implementation ([#698](https://github.com/openai/openai-node/issues/698)) ([64041fd](https://github.com/openai/openai-node/commit/64041fd33da569eccae64afe4e50ee803017b20b))
+* remove extraneous --save and yarn install instructions ([#710](https://github.com/openai/openai-node/issues/710)) ([8ec216d](https://github.com/openai/openai-node/commit/8ec216d6b72ee4d67e26786f06c93af18d042117))
+* use [@deprecated](https://github.com/deprecated) decorator for deprecated params ([#711](https://github.com/openai/openai-node/issues/711)) ([4688ef4](https://github.com/openai/openai-node/commit/4688ef4b36e9f383a3abf6cdb31d498163a7bb9e))
+
## 4.28.4 (2024-02-28)
Full Changelog: [v4.28.3...v4.28.4](https://github.com/openai/openai-node/compare/v4.28.3...v4.28.4)
diff --git a/README.md b/README.md
index 28262aaca..24d38ac79 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.28.4/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.28.5/mod.ts';
```
diff --git a/build-deno b/build-deno
index 74d994d08..fb739cc50 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.28.4/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.28.5/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index 65d6046f6..d51c4ca96 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.28.4",
+ "version": "4.28.5",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index 9dd894067..516e764d1 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.28.4'; // x-release-please-version
+export const VERSION = '4.28.5'; // x-release-please-version
From 7d27d286876d0a575d91a4752f401126fe93d2a3 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Wed, 13 Mar 2024 16:30:47 -0400
Subject: [PATCH 023/533] feat(assistants): add support for streaming (#714)
See the reference docs for more information:
https://platform.openai.com/docs/api-reference/assistants-streaming
We've also improved some of the names for the types in
the assistants beta, non exhaustive list:
- `CodeToolCall` -> `CodeInterpreterToolCall`
- `MessageContentImageFile` -> `ImageFileContentBlock`
- `MessageContentText` -> `TextContentBlock`
- `ThreadMessage` -> `Message`
- `ThreadMessageDeleted` -> `MessageDeleted`
---
api.md | 58 +-
examples/assistant-stream-raw.ts | 39 +
examples/assistant-stream.ts | 48 +
examples/assistants.ts | 57 ++
src/index.ts | 1 +
src/lib/AbstractAssistantStreamRunner.ts | 340 +++++++
src/lib/AssistantStream.ts | 698 +++++++++++++++
src/resources/beta/assistants/assistants.ts | 844 ++++++++++++++++--
src/resources/beta/assistants/index.ts | 9 +
src/resources/beta/beta.ts | 12 +
src/resources/beta/index.ts | 12 +
src/resources/beta/threads/index.ts | 35 +-
src/resources/beta/threads/messages/index.ts | 26 +-
.../beta/threads/messages/messages.ts | 426 +++++++--
src/resources/beta/threads/runs/index.ts | 19 +-
src/resources/beta/threads/runs/runs.ts | 281 ++++--
src/resources/beta/threads/runs/steps.ts | 259 +++++-
src/resources/beta/threads/threads.ts | 207 ++++-
src/resources/chat/completions.ts | 2 +-
src/resources/completions.ts | 2 +
src/resources/shared.ts | 10 +
src/streaming.ts | 14 +
.../beta/threads/runs/runs.test.ts | 2 +
.../beta/threads/threads.test.ts | 1 +
tests/streaming/assistants/assistant.test.ts | 32 +
25 files changed, 3155 insertions(+), 279 deletions(-)
create mode 100644 examples/assistant-stream-raw.ts
create mode 100644 examples/assistant-stream.ts
create mode 100644 examples/assistants.ts
create mode 100644 src/lib/AbstractAssistantStreamRunner.ts
create mode 100644 src/lib/AssistantStream.ts
create mode 100644 tests/streaming/assistants/assistant.test.ts
diff --git a/api.md b/api.md
index ff3180cba..504a103c7 100644
--- a/api.md
+++ b/api.md
@@ -2,6 +2,7 @@
Types:
+- ErrorObject
- FunctionDefinition
- FunctionParameters
@@ -177,6 +178,15 @@ Types:
- Assistant
- AssistantDeleted
+- AssistantStreamEvent
+- AssistantTool
+- CodeInterpreterTool
+- FunctionTool
+- MessageStreamEvent
+- RetrievalTool
+- RunStepStreamEvent
+- RunStreamEvent
+- ThreadStreamEvent
Methods:
@@ -214,6 +224,7 @@ Methods:
- client.beta.threads.update(threadId, { ...params }) -> Thread
- client.beta.threads.del(threadId) -> ThreadDeleted
- client.beta.threads.createAndRun({ ...params }) -> Run
+- client.beta.threads.createAndRunStream(body, options?) -> AssistantStream
### Runs
@@ -231,16 +242,29 @@ Methods:
- client.beta.threads.runs.list(threadId, { ...params }) -> RunsPage
- client.beta.threads.runs.cancel(threadId, runId) -> Run
- client.beta.threads.runs.submitToolOutputs(threadId, runId, { ...params }) -> Run
+- client.beta.threads.runs.createAndStream(threadId, body, options?) -> AssistantStream
+- client.beta.threads.runs.submitToolOutputsStream(threadId, runId, body, options?) -> AssistantStream
#### Steps
Types:
-- CodeToolCall
+- CodeInterpreterLogs
+- CodeInterpreterOutputImage
+- CodeInterpreterToolCall
+- CodeInterpreterToolCallDelta
- FunctionToolCall
+- FunctionToolCallDelta
- MessageCreationStepDetails
- RetrievalToolCall
+- RetrievalToolCallDelta
- RunStep
+- RunStepDelta
+- RunStepDeltaEvent
+- RunStepDeltaMessageDelta
+- ToolCall
+- ToolCallDelta
+- ToolCallDeltaObject
- ToolCallsStepDetails
Methods:
@@ -252,17 +276,33 @@ Methods:
Types:
-- MessageContentImageFile
-- MessageContentText
-- ThreadMessage
-- ThreadMessageDeleted
+- Annotation
+- AnnotationDelta
+- FileCitationAnnotation
+- FileCitationDeltaAnnotation
+- FilePathAnnotation
+- FilePathDeltaAnnotation
+- ImageFile
+- ImageFileContentBlock
+- ImageFileDelta
+- ImageFileDeltaBlock
+- Message
+- MessageContent
+- MessageContentDelta
+- MessageDeleted
+- MessageDelta
+- MessageDeltaEvent
+- Text
+- TextContentBlock
+- TextDelta
+- TextDeltaBlock
Methods:
-- client.beta.threads.messages.create(threadId, { ...params }) -> ThreadMessage
-- client.beta.threads.messages.retrieve(threadId, messageId) -> ThreadMessage
-- client.beta.threads.messages.update(threadId, messageId, { ...params }) -> ThreadMessage
-- client.beta.threads.messages.list(threadId, { ...params }) -> ThreadMessagesPage
+- client.beta.threads.messages.create(threadId, { ...params }) -> Message
+- client.beta.threads.messages.retrieve(threadId, messageId) -> Message
+- client.beta.threads.messages.update(threadId, messageId, { ...params }) -> Message
+- client.beta.threads.messages.list(threadId, { ...params }) -> MessagesPage
#### Files
diff --git a/examples/assistant-stream-raw.ts b/examples/assistant-stream-raw.ts
new file mode 100644
index 000000000..a882d219a
--- /dev/null
+++ b/examples/assistant-stream-raw.ts
@@ -0,0 +1,39 @@
+import OpenAI from 'openai';
+
+const openai = new OpenAI();
+
+async function main() {
+ const assistant = await openai.beta.assistants.create({
+ model: 'gpt-4-1106-preview',
+ name: 'Math Tutor',
+ instructions: 'You are a personal math tutor. Write and run code to answer math questions.',
+ });
+
+ const thread = await openai.beta.threads.create({
+ messages: [
+ {
+ role: 'user',
+ content: '"I need to solve the equation `3x + 11 = 14`. Can you help me?"',
+ },
+ ],
+ });
+
+ const stream = await openai.beta.threads.runs.create(thread.id, {
+ assistant_id: assistant.id,
+ additional_instructions: 'Please address the user as Jane Doe. The user has a premium account.',
+ stream: true,
+ });
+
+ for await (const event of stream) {
+ if (event.event === 'thread.message.delta') {
+ const chunk = event.data.delta.content?.[0];
+ if (chunk && 'text' in chunk) {
+ process.stdout.write(chunk.text.value);
+ }
+ }
+ }
+
+ console.log();
+}
+
+main();
diff --git a/examples/assistant-stream.ts b/examples/assistant-stream.ts
new file mode 100644
index 000000000..36c4ed152
--- /dev/null
+++ b/examples/assistant-stream.ts
@@ -0,0 +1,48 @@
+#!/usr/bin/env -S npm run tsn -T
+
+import OpenAI from 'openai';
+
+/**
+ * Example of streaming a response from an assistant
+ */
+
+const openai = new OpenAI();
+
+async function main() {
+ const assistant = await openai.beta.assistants.create({
+ model: 'gpt-4-1106-preview',
+ name: 'Math Tutor',
+ instructions: 'You are a personal math tutor. Write and run code to answer math questions.',
+ });
+
+ let assistantId = assistant.id;
+ console.log('Created Assistant with Id: ' + assistantId);
+
+ const thread = await openai.beta.threads.create({
+ messages: [
+ {
+ role: 'user',
+ content: '"I need to solve the equation `3x + 11 = 14`. Can you help me?"',
+ },
+ ],
+ });
+
+ let threadId = thread.id;
+ console.log('Created thread with Id: ' + threadId);
+
+ const run = openai.beta.threads.runs
+ .createAndStream(threadId, {
+ assistant_id: assistantId,
+ })
+ //Subscribe to streaming events and log them
+ .on('event', (event) => console.log(event))
+ .on('textDelta', (delta, snapshot) => console.log(snapshot))
+ .on('messageDelta', (delta, snapshot) => console.log(snapshot))
+ .on('run', (run) => console.log(run))
+ .on('messageDelta', (delta, snapshot) => console.log(snapshot))
+ .on('connect', () => console.log());
+ const result = await run.finalRun();
+ console.log('Run Result' + result);
+}
+
+main();
diff --git a/examples/assistants.ts b/examples/assistants.ts
new file mode 100644
index 000000000..bbc2f80ce
--- /dev/null
+++ b/examples/assistants.ts
@@ -0,0 +1,57 @@
+#!/usr/bin/env -S npm run tsn -T
+
+import OpenAI from 'openai';
+import { sleep } from 'openai/core';
+
+/**
+ * Example of polling for a complete response from an assistant
+ */
+
+const openai = new OpenAI();
+
+async function main() {
+ const assistant = await openai.beta.assistants.create({
+ model: 'gpt-4-1106-preview',
+ name: 'Math Tutor',
+ instructions: 'You are a personal math tutor. Write and run code to answer math questions.',
+ // tools = [],
+ });
+
+ let assistantId = assistant.id;
+ console.log('Created Assistant with Id: ' + assistantId);
+
+ const thread = await openai.beta.threads.create({
+ messages: [
+ {
+ role: 'user',
+ content: '"I need to solve the equation `3x + 11 = 14`. Can you help me?"',
+ },
+ ],
+ });
+
+ let threadId = thread.id;
+ console.log('Created thread with Id: ' + threadId);
+
+ const run = await openai.beta.threads.runs.create(thread.id, {
+ assistant_id: assistantId,
+ additional_instructions: 'Please address the user as Jane Doe. The user has a premium account.',
+ });
+
+ console.log('Created run with Id: ' + run.id);
+
+ while (true) {
+ const result = await openai.beta.threads.runs.retrieve(thread.id, run.id);
+ if (result.status == 'completed') {
+ const messages = await openai.beta.threads.messages.list(thread.id);
+ for (const message of messages.getPaginatedItems()) {
+ console.log(message);
+ }
+ break;
+ } else {
+ console.log('Waiting for completion. Current status: ' + result.status);
+ await sleep(5000);
+ }
+ }
+}
+
+main();
diff --git a/src/index.ts b/src/index.ts
index 80bf95b0d..7b3033fa9 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -285,6 +285,7 @@ export namespace OpenAI {
export import Beta = API.Beta;
+ export import ErrorObject = API.ErrorObject;
export import FunctionDefinition = API.FunctionDefinition;
export import FunctionParameters = API.FunctionParameters;
}
diff --git a/src/lib/AbstractAssistantStreamRunner.ts b/src/lib/AbstractAssistantStreamRunner.ts
new file mode 100644
index 000000000..b600f0df3
--- /dev/null
+++ b/src/lib/AbstractAssistantStreamRunner.ts
@@ -0,0 +1,340 @@
+import * as Core from 'openai/core';
+import { APIUserAbortError, OpenAIError } from 'openai/error';
+import { Run, RunSubmitToolOutputsParamsBase } from 'openai/resources/beta/threads/runs/runs';
+import { RunCreateParamsBase, Runs } from 'openai/resources/beta/threads/runs/runs';
+import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads';
+
+export abstract class AbstractAssistantStreamRunner<
+ Events extends CustomEvents = AbstractAssistantRunnerEvents,
+> {
+ controller: AbortController = new AbortController();
+
+ #connectedPromise: Promise;
+ #resolveConnectedPromise: () => void = () => {};
+ #rejectConnectedPromise: (error: OpenAIError) => void = () => {};
+
+ #endPromise: Promise;
+ #resolveEndPromise: () => void = () => {};
+ #rejectEndPromise: (error: OpenAIError) => void = () => {};
+
+ #listeners: { [Event in keyof Events]?: ListenersForEvent } = {};
+
+ #ended = false;
+ #errored = false;
+ #aborted = false;
+ #catchingPromiseCreated = false;
+
+ constructor() {
+ this.#connectedPromise = new Promise((resolve, reject) => {
+ this.#resolveConnectedPromise = resolve;
+ this.#rejectConnectedPromise = reject;
+ });
+
+ this.#endPromise = new Promise((resolve, reject) => {
+ this.#resolveEndPromise = resolve;
+ this.#rejectEndPromise = reject;
+ });
+
+ // Don't let these promises cause unhandled rejection errors.
+ // we will manually cause an unhandled rejection error later
+ // if the user hasn't registered any error listener or called
+ // any promise-returning method.
+ this.#connectedPromise.catch(() => {});
+ this.#endPromise.catch(() => {});
+ }
+
+ protected _run(executor: () => Promise) {
+ // Unfortunately if we call `executor()` immediately we get runtime errors about
+ // references to `this` before the `super()` constructor call returns.
+ setTimeout(() => {
+ executor().then(() => {
+ // this._emitFinal();
+ this._emit('end');
+ }, this.#handleError);
+ }, 0);
+ }
+
+ protected _addRun(run: Run): Run {
+ return run;
+ }
+
+ protected _connected() {
+ if (this.ended) return;
+ this.#resolveConnectedPromise();
+ this._emit('connect');
+ }
+
+ get ended(): boolean {
+ return this.#ended;
+ }
+
+ get errored(): boolean {
+ return this.#errored;
+ }
+
+ get aborted(): boolean {
+ return this.#aborted;
+ }
+
+ abort() {
+ this.controller.abort();
+ }
+
+ /**
+ * Adds the listener function to the end of the listeners array for the event.
+ * No checks are made to see if the listener has already been added. Multiple calls passing
+ * the same combination of event and listener will result in the listener being added, and
+ * called, multiple times.
+ * @returns this ChatCompletionStream, so that calls can be chained
+ */
+ on(event: Event, listener: ListenerForEvent): this {
+ const listeners: ListenersForEvent =
+ this.#listeners[event] || (this.#listeners[event] = []);
+ listeners.push({ listener });
+ return this;
+ }
+
+ /**
+ * Removes the specified listener from the listener array for the event.
+ * off() will remove, at most, one instance of a listener from the listener array. If any single
+ * listener has been added multiple times to the listener array for the specified event, then
+ * off() must be called multiple times to remove each instance.
+ * @returns this ChatCompletionStream, so that calls can be chained
+ */
+ off(event: Event, listener: ListenerForEvent): this {
+ const listeners = this.#listeners[event];
+ if (!listeners) return this;
+ const index = listeners.findIndex((l) => l.listener === listener);
+ if (index >= 0) listeners.splice(index, 1);
+ return this;
+ }
+
+ /**
+ * Adds a one-time listener function for the event. The next time the event is triggered,
+ * this listener is removed and then invoked.
+ * @returns this ChatCompletionStream, so that calls can be chained
+ */
+ once(event: Event, listener: ListenerForEvent): this {
+ const listeners: ListenersForEvent =
+ this.#listeners[event] || (this.#listeners[event] = []);
+ listeners.push({ listener, once: true });
+ return this;
+ }
+
+ /**
+ * This is similar to `.once()`, but returns a Promise that resolves the next time
+ * the event is triggered, instead of calling a listener callback.
+ * @returns a Promise that resolves the next time given event is triggered,
+ * or rejects if an error is emitted. (If you request the 'error' event,
+ * returns a promise that resolves with the error).
+ *
+ * Example:
+ *
+ * const message = await stream.emitted('message') // rejects if the stream errors
+ */
+ emitted(
+ event: Event,
+ ): Promise<
+ EventParameters extends [infer Param] ? Param
+ : EventParameters extends [] ? void
+ : EventParameters
+ > {
+ return new Promise((resolve, reject) => {
+ this.#catchingPromiseCreated = true;
+ if (event !== 'error') this.once('error', reject);
+ this.once(event, resolve as any);
+ });
+ }
+
+ async done(): Promise {
+ this.#catchingPromiseCreated = true;
+ await this.#endPromise;
+ }
+
+ #handleError = (error: unknown) => {
+ this.#errored = true;
+ if (error instanceof Error && error.name === 'AbortError') {
+ error = new APIUserAbortError();
+ }
+ if (error instanceof APIUserAbortError) {
+ this.#aborted = true;
+ return this._emit('abort', error);
+ }
+ if (error instanceof OpenAIError) {
+ return this._emit('error', error);
+ }
+ if (error instanceof Error) {
+ const openAIError: OpenAIError = new OpenAIError(error.message);
+ // @ts-ignore
+ openAIError.cause = error;
+ return this._emit('error', openAIError);
+ }
+ return this._emit('error', new OpenAIError(String(error)));
+ };
+
+ protected _emit(event: Event, ...args: EventParameters) {
+ // make sure we don't emit any events after end
+ if (this.#ended) {
+ return;
+ }
+
+ if (event === 'end') {
+ this.#ended = true;
+ this.#resolveEndPromise();
+ }
+
+ const listeners: ListenersForEvent | undefined = this.#listeners[event];
+ if (listeners) {
+ this.#listeners[event] = listeners.filter((l) => !l.once) as any;
+ listeners.forEach(({ listener }: any) => listener(...args));
+ }
+
+ if (event === 'abort') {
+ const error = args[0] as APIUserAbortError;
+ if (!this.#catchingPromiseCreated && !listeners?.length) {
+ Promise.reject(error);
+ }
+ this.#rejectConnectedPromise(error);
+ this.#rejectEndPromise(error);
+ this._emit('end');
+ return;
+ }
+
+ if (event === 'error') {
+ // NOTE: _emit('error', error) should only be called from #handleError().
+
+ const error = args[0] as OpenAIError;
+ if (!this.#catchingPromiseCreated && !listeners?.length) {
+ // Trigger an unhandled rejection if the user hasn't registered any error handlers.
+ // If you are seeing stack traces here, make sure to handle errors via either:
+ // - runner.on('error', () => ...)
+ // - await runner.done()
+ // - await runner.finalChatCompletion()
+ // - etc.
+ Promise.reject(error);
+ }
+ this.#rejectConnectedPromise(error);
+ this.#rejectEndPromise(error);
+ this._emit('end');
+ }
+ }
+
+ protected async _threadAssistantStream(
+ body: ThreadCreateAndRunParamsBase,
+ thread: Threads,
+ options?: Core.RequestOptions,
+ ): Promise {
+ return await this._createThreadAssistantStream(thread, body, options);
+ }
+
+ protected async _runAssistantStream(
+ threadId: string,
+ runs: Runs,
+ params: RunCreateParamsBase,
+ options?: Core.RequestOptions,
+ ): Promise {
+ return await this._createAssistantStream(runs, threadId, params, options);
+ }
+
+ protected async _runToolAssistantStream(
+ threadId: string,
+ runId: string,
+ runs: Runs,
+ params: RunSubmitToolOutputsParamsBase,
+ options?: Core.RequestOptions,
+ ): Promise {
+ return await this._createToolAssistantStream(runs, threadId, runId, params, options);
+ }
+
+ protected async _createThreadAssistantStream(
+ thread: Threads,
+ body: ThreadCreateAndRunParamsBase,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const signal = options?.signal;
+ if (signal) {
+ if (signal.aborted) this.controller.abort();
+ signal.addEventListener('abort', () => this.controller.abort());
+ }
+ // this.#validateParams(params);
+
+ const runResult = await thread.createAndRun(
+ { ...body, stream: false },
+ { ...options, signal: this.controller.signal },
+ );
+ this._connected();
+ return this._addRun(runResult as Run);
+ }
+
+ protected async _createToolAssistantStream(
+ run: Runs,
+ threadId: string,
+ runId: string,
+ params: RunSubmitToolOutputsParamsBase,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const signal = options?.signal;
+ if (signal) {
+ if (signal.aborted) this.controller.abort();
+ signal.addEventListener('abort', () => this.controller.abort());
+ }
+
+ const runResult = await run.submitToolOutputs(
+ threadId,
+ runId,
+ { ...params, stream: false },
+ { ...options, signal: this.controller.signal },
+ );
+ this._connected();
+ return this._addRun(runResult as Run);
+ }
+
+ protected async _createAssistantStream(
+ run: Runs,
+ threadId: string,
+ params: RunCreateParamsBase,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const signal = options?.signal;
+ if (signal) {
+ if (signal.aborted) this.controller.abort();
+ signal.addEventListener('abort', () => this.controller.abort());
+ }
+ // this.#validateParams(params);
+
+ const runResult = await run.create(
+ threadId,
+ { ...params, stream: false },
+ { ...options, signal: this.controller.signal },
+ );
+ this._connected();
+ return this._addRun(runResult as Run);
+ }
+}
+
+type CustomEvents = {
+ [k in Event]: k extends keyof AbstractAssistantRunnerEvents ? AbstractAssistantRunnerEvents[k]
+ : (...args: any[]) => void;
+};
+
+type ListenerForEvent, Event extends keyof Events> = Event extends (
+ keyof AbstractAssistantRunnerEvents
+) ?
+ AbstractAssistantRunnerEvents[Event]
+: Events[Event];
+
+type ListenersForEvent, Event extends keyof Events> = Array<{
+ listener: ListenerForEvent;
+ once?: boolean;
+}>;
+type EventParameters, Event extends keyof Events> = Parameters<
+ ListenerForEvent
+>;
+
+export interface AbstractAssistantRunnerEvents {
+ connect: () => void;
+ run: (run: Run) => void;
+ error: (error: OpenAIError) => void;
+ abort: (error: APIUserAbortError) => void;
+ end: () => void;
+}
diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts
new file mode 100644
index 000000000..d70cb7358
--- /dev/null
+++ b/src/lib/AssistantStream.ts
@@ -0,0 +1,698 @@
+import {
+ TextContentBlock,
+ ImageFileContentBlock,
+ Message,
+ MessageContentDelta,
+ Text,
+ ImageFile,
+ TextDelta,
+ Messages,
+} from 'openai/resources/beta/threads/messages/messages';
+import * as Core from 'openai/core';
+import { RequestOptions } from 'openai/core';
+import {
+ Run,
+ RunCreateParamsBase,
+ RunCreateParamsStreaming,
+ Runs,
+ RunSubmitToolOutputsParamsBase,
+ RunSubmitToolOutputsParamsStreaming,
+} from 'openai/resources/beta/threads/runs/runs';
+import {
+ AbstractAssistantRunnerEvents,
+ AbstractAssistantStreamRunner,
+} from './AbstractAssistantStreamRunner';
+import { type ReadableStream } from 'openai/_shims/index';
+import { Stream } from 'openai/streaming';
+import { APIUserAbortError, OpenAIError } from 'openai/error';
+import {
+ AssistantStreamEvent,
+ MessageStreamEvent,
+ RunStepStreamEvent,
+ RunStreamEvent,
+} from 'openai/resources/beta/assistants/assistants';
+import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from 'openai/resources/beta/threads/runs/steps';
+import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads';
+import MessageDelta = Messages.MessageDelta;
+
+export interface AssistantStreamEvents extends AbstractAssistantRunnerEvents {
+ //New event structure
+ messageCreated: (message: Message) => void;
+ messageDelta: (message: MessageDelta, snapshot: Message) => void;
+ messageDone: (message: Message) => void;
+
+ runStepCreated: (runStep: RunStep) => void;
+ runStepDelta: (delta: RunStepDelta, snapshot: Runs.RunStep) => void;
+ runStepDone: (runStep: Runs.RunStep, snapshot: Runs.RunStep) => void;
+
+ toolCallCreated: (toolCall: ToolCall) => void;
+ toolCallDelta: (delta: ToolCallDelta, snapshot: ToolCall) => void;
+ toolCallDone: (toolCall: ToolCall) => void;
+
+ textCreated: (content: Text) => void;
+ textDelta: (delta: TextDelta, snapshot: Text) => void;
+ textDone: (content: Text, snapshot: Message) => void;
+
+ //No created or delta as this is not streamed
+ imageFileDone: (content: ImageFile, snapshot: Message) => void;
+
+ end: () => void;
+
+ event: (event: AssistantStreamEvent) => void;
+}
+
+export type ThreadCreateAndRunParamsBaseStream = Omit & {
+ stream?: true;
+};
+
+export type RunCreateParamsBaseStream = Omit & {
+ stream?: true;
+};
+
+export type RunSubmitToolOutputsParamsStream = Omit & {
+ stream?: true;
+};
+
+export class AssistantStream
+ extends AbstractAssistantStreamRunner
+ implements AsyncIterable
+{
+ //Track all events in a single list for reference
+ #events: AssistantStreamEvent[] = [];
+
+ //Used to accumulate deltas
+ //We are accumulating many types so the value here is not strict
+ #runStepSnapshots: { [id: string]: Runs.RunStep } = {};
+ #messageSnapshots: { [id: string]: Message } = {};
+ #messageSnapshot: Message | undefined;
+ #finalRun: Run | undefined;
+ #currentContentIndex: number | undefined;
+ #currentContent: TextContentBlock | ImageFileContentBlock | undefined;
+ #currentToolCallIndex: number | undefined;
+ #currentToolCall: ToolCall | undefined;
+
+ //For current snapshot methods
+ #currentEvent: AssistantStreamEvent | undefined;
+ #currentRunSnapshot: Run | undefined;
+ #currentRunStepSnapshot: Runs.RunStep | undefined;
+
+ [Symbol.asyncIterator](): AsyncIterator {
+ const pushQueue: AssistantStreamEvent[] = [];
+ const readQueue: {
+ resolve: (chunk: AssistantStreamEvent | undefined) => void;
+ reject: (err: unknown) => void;
+ }[] = [];
+ let done = false;
+
+ //Catch all for passing along all events
+ this.on('event', (event) => {
+ const reader = readQueue.shift();
+ if (reader) {
+ reader.resolve(event);
+ } else {
+ pushQueue.push(event);
+ }
+ });
+
+ this.on('end', () => {
+ done = true;
+ for (const reader of readQueue) {
+ reader.resolve(undefined);
+ }
+ readQueue.length = 0;
+ });
+
+ this.on('abort', (err) => {
+ done = true;
+ for (const reader of readQueue) {
+ reader.reject(err);
+ }
+ readQueue.length = 0;
+ });
+
+ this.on('error', (err) => {
+ done = true;
+ for (const reader of readQueue) {
+ reader.reject(err);
+ }
+ readQueue.length = 0;
+ });
+
+ return {
+ next: async (): Promise> => {
+ if (!pushQueue.length) {
+ if (done) {
+ return { value: undefined, done: true };
+ }
+ return new Promise((resolve, reject) =>
+ readQueue.push({ resolve, reject }),
+ ).then((chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true }));
+ }
+ const chunk = pushQueue.shift()!;
+ return { value: chunk, done: false };
+ },
+ return: async () => {
+ this.abort();
+ return { value: undefined, done: true };
+ },
+ };
+ }
+
+ toReadableStream(): ReadableStream {
+ const stream = new Stream(this[Symbol.asyncIterator].bind(this), this.controller);
+ return stream.toReadableStream();
+ }
+
+ static createToolAssistantStream(
+ threadId: string,
+ runId: string,
+ runs: Runs,
+ body: RunSubmitToolOutputsParamsStream,
+ options: RequestOptions | undefined,
+ ) {
+ const runner = new AssistantStream();
+ runner._run(() =>
+ runner._runToolAssistantStream(threadId, runId, runs, body, {
+ ...options,
+ headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' },
+ }),
+ );
+ return runner;
+ }
+
+ protected override async _createToolAssistantStream(
+ run: Runs,
+ threadId: string,
+ runId: string,
+ params: RunSubmitToolOutputsParamsStream,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const signal = options?.signal;
+ if (signal) {
+ if (signal.aborted) this.controller.abort();
+ signal.addEventListener('abort', () => this.controller.abort());
+ }
+
+ const body: RunSubmitToolOutputsParamsStreaming = { ...params, stream: true };
+ const stream = await run.submitToolOutputs(threadId, runId, body, {
+ ...options,
+ signal: this.controller.signal,
+ });
+
+ this._connected();
+
+ for await (const event of stream) {
+ this.#addEvent(event);
+ }
+ if (stream.controller.signal?.aborted) {
+ throw new APIUserAbortError();
+ }
+
+ return this._addRun(this.#endRequest());
+ }
+
+ static createThreadAssistantStream(
+ body: ThreadCreateAndRunParamsBaseStream,
+ thread: Threads,
+ options?: RequestOptions,
+ ) {
+ const runner = new AssistantStream();
+ runner._run(() =>
+ runner._threadAssistantStream(body, thread, {
+ ...options,
+ headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' },
+ }),
+ );
+ return runner;
+ }
+
+ static createAssistantStream(
+ threadId: string,
+ runs: Runs,
+ params: RunCreateParamsBaseStream,
+ options?: RequestOptions,
+ ) {
+ const runner = new AssistantStream();
+ runner._run(() =>
+ runner._runAssistantStream(threadId, runs, params, {
+ ...options,
+ headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' },
+ }),
+ );
+ return runner;
+ }
+
+ currentEvent(): AssistantStreamEvent | undefined {
+ return this.#currentEvent;
+ }
+
+ currentRun(): Run | undefined {
+ return this.#currentRunSnapshot;
+ }
+
+ currentMessageSnapshot(): Message | undefined {
+ return this.#messageSnapshot;
+ }
+
+ currentRunStepSnapshot(): Runs.RunStep | undefined {
+ return this.#currentRunStepSnapshot;
+ }
+
+ async finalRunSteps(): Promise {
+ await this.done();
+
+ return Object.values(this.#runStepSnapshots);
+ }
+
+ async finalMessages(): Promise {
+ await this.done();
+
+ return Object.values(this.#messageSnapshots);
+ }
+
+ async finalRun(): Promise {
+ await this.done();
+ if (!this.#finalRun) throw Error('Final run was not received.');
+
+ return this.#finalRun;
+ }
+
+ protected override async _createThreadAssistantStream(
+ thread: Threads,
+ params: ThreadCreateAndRunParamsBase,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const signal = options?.signal;
+ if (signal) {
+ if (signal.aborted) this.controller.abort();
+ signal.addEventListener('abort', () => this.controller.abort());
+ }
+
+ const body: RunCreateParamsStreaming = { ...params, stream: true };
+ const stream = await thread.createAndRun(body, { ...options, signal: this.controller.signal });
+
+ this._connected();
+
+ for await (const event of stream) {
+ this.#addEvent(event);
+ }
+ if (stream.controller.signal?.aborted) {
+ throw new APIUserAbortError();
+ }
+
+ return this._addRun(this.#endRequest());
+ }
+
+ protected override async _createAssistantStream(
+ run: Runs,
+ threadId: string,
+ params: RunCreateParamsBase,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const signal = options?.signal;
+ if (signal) {
+ if (signal.aborted) this.controller.abort();
+ signal.addEventListener('abort', () => this.controller.abort());
+ }
+
+ const body: RunCreateParamsStreaming = { ...params, stream: true };
+ const stream = await run.create(threadId, body, { ...options, signal: this.controller.signal });
+
+ this._connected();
+
+ for await (const event of stream) {
+ this.#addEvent(event);
+ }
+ if (stream.controller.signal?.aborted) {
+ throw new APIUserAbortError();
+ }
+
+ return this._addRun(this.#endRequest());
+ }
+
+ #addEvent(event: AssistantStreamEvent) {
+ if (this.ended) return;
+
+ this.#currentEvent = event;
+
+ this.#handleEvent(event);
+
+ switch (event.event) {
+ case 'thread.created':
+ //No action on this event.
+ break;
+
+ case 'thread.run.created':
+ case 'thread.run.queued':
+ case 'thread.run.in_progress':
+ case 'thread.run.requires_action':
+ case 'thread.run.completed':
+ case 'thread.run.failed':
+ case 'thread.run.cancelling':
+ case 'thread.run.cancelled':
+ case 'thread.run.expired':
+ this.#handleRun(event);
+ break;
+
+ case 'thread.run.step.created':
+ case 'thread.run.step.in_progress':
+ case 'thread.run.step.delta':
+ case 'thread.run.step.completed':
+ case 'thread.run.step.failed':
+ case 'thread.run.step.cancelled':
+ case 'thread.run.step.expired':
+ this.#handleRunStep(event);
+ break;
+
+ case 'thread.message.created':
+ case 'thread.message.in_progress':
+ case 'thread.message.delta':
+ case 'thread.message.completed':
+ case 'thread.message.incomplete':
+ this.#handleMessage(event);
+ break;
+
+ case 'error':
+ //This is included for completeness, but errors are processed in the SSE event processing so this should not occur
+ throw new Error(
+ 'Encountered an error event in event processing - errors should be processed earlier',
+ );
+ }
+ }
+
+ #endRequest(): Run {
+ if (this.ended) {
+ throw new OpenAIError(`stream has ended, this shouldn't happen`);
+ }
+
+ if (!this.#finalRun) throw Error('Final run has been been received');
+
+ return this.#finalRun;
+ }
+
+ #handleMessage(event: MessageStreamEvent) {
+ const [accumulatedMessage, newContent] = this.#accumulateMessage(event, this.#messageSnapshot);
+ this.#messageSnapshot = accumulatedMessage;
+ this.#messageSnapshots[accumulatedMessage.id] = accumulatedMessage;
+
+ for (const content of newContent) {
+ const snapshotContent = accumulatedMessage.content[content.index];
+ if (snapshotContent?.type == 'text') {
+ this._emit('textCreated', snapshotContent.text);
+ }
+ }
+
+ switch (event.event) {
+ case 'thread.message.created':
+ this._emit('messageCreated', event.data);
+ break;
+
+ case 'thread.message.in_progress':
+ break;
+
+ case 'thread.message.delta':
+ this._emit('messageDelta', event.data.delta, accumulatedMessage);
+
+ if (event.data.delta.content) {
+ for (const content of event.data.delta.content) {
+ //If it is text delta, emit a text delta event
+ if (content.type == 'text' && content.text) {
+ let textDelta = content.text;
+ let snapshot = accumulatedMessage.content[content.index];
+ if (snapshot && snapshot.type == 'text') {
+ this._emit('textDelta', textDelta, snapshot.text);
+ } else {
+ throw Error('The snapshot associated with this text delta is not text or missing');
+ }
+ }
+
+ if (content.index != this.#currentContentIndex) {
+ //See if we have in progress content
+ if (this.#currentContent) {
+ switch (this.#currentContent.type) {
+ case 'text':
+ this._emit('textDone', this.#currentContent.text, this.#messageSnapshot);
+ break;
+ case 'image_file':
+ this._emit('imageFileDone', this.#currentContent.image_file, this.#messageSnapshot);
+ break;
+ }
+ }
+
+ this.#currentContentIndex = content.index;
+ }
+
+ this.#currentContent = accumulatedMessage.content[content.index];
+ }
+ }
+
+ break;
+
+ case 'thread.message.completed':
+ case 'thread.message.incomplete':
+ //We emit the latest content we were working on on completion (including incomplete)
+ if (this.#currentContentIndex !== undefined) {
+ const currentContent = event.data.content[this.#currentContentIndex];
+ if (currentContent) {
+ switch (currentContent.type) {
+ case 'image_file':
+ this._emit('imageFileDone', currentContent.image_file, this.#messageSnapshot);
+ break;
+ case 'text':
+ this._emit('textDone', currentContent.text, this.#messageSnapshot);
+ break;
+ }
+ }
+ }
+
+ if (this.#messageSnapshot) {
+ this._emit('messageDone', event.data);
+ }
+
+ this.#messageSnapshot = undefined;
+ }
+ }
+
+ #handleRunStep(event: RunStepStreamEvent) {
+ const accumulatedRunStep = this.#accumulateRunStep(event);
+ this.#currentRunStepSnapshot = accumulatedRunStep;
+
+ switch (event.event) {
+ case 'thread.run.step.created':
+ this._emit('runStepCreated', event.data);
+ break;
+ case 'thread.run.step.delta':
+ const delta = event.data.delta;
+ if (
+ delta.step_details &&
+ delta.step_details.type == 'tool_calls' &&
+ delta.step_details.tool_calls &&
+ accumulatedRunStep.step_details.type == 'tool_calls'
+ ) {
+ for (const toolCall of delta.step_details.tool_calls) {
+ if (toolCall.index == this.#currentToolCallIndex) {
+ this._emit(
+ 'toolCallDelta',
+ toolCall,
+ accumulatedRunStep.step_details.tool_calls[toolCall.index] as ToolCall,
+ );
+ } else {
+ if (this.#currentToolCall) {
+ this._emit('toolCallDone', this.#currentToolCall);
+ }
+
+ this.#currentToolCallIndex = toolCall.index;
+ this.#currentToolCall = accumulatedRunStep.step_details.tool_calls[toolCall.index];
+ if (this.#currentToolCall) this._emit('toolCallCreated', this.#currentToolCall);
+ }
+ }
+ }
+
+ this._emit('runStepDelta', event.data.delta, accumulatedRunStep);
+ break;
+ case 'thread.run.step.completed':
+ case 'thread.run.step.failed':
+ case 'thread.run.step.cancelled':
+ case 'thread.run.step.expired':
+ this.#currentRunStepSnapshot = undefined;
+ const details = event.data.step_details;
+ if (details.type == 'tool_calls') {
+ if (this.#currentToolCall) {
+ this._emit('toolCallDone', this.#currentToolCall as ToolCall);
+ this.#currentToolCall = undefined;
+ }
+ }
+ this._emit('runStepDone', event.data, accumulatedRunStep);
+ break;
+ case 'thread.run.step.in_progress':
+ break;
+ }
+ }
+
+ #handleEvent(event: AssistantStreamEvent) {
+ this.#events.push(event);
+ this._emit('event', event);
+ }
+
+ #accumulateRunStep(event: RunStepStreamEvent): Runs.RunStep {
+ switch (event.event) {
+ case 'thread.run.step.created':
+ this.#runStepSnapshots[event.data.id] = event.data;
+ return event.data;
+
+ case 'thread.run.step.delta':
+ let snapshot = this.#runStepSnapshots[event.data.id] as Runs.RunStep;
+ if (!snapshot) {
+ throw Error('Received a RunStepDelta before creation of a snapshot');
+ }
+
+ let data = event.data;
+
+ if (data.delta) {
+ const accumulated = AssistantStream.accumulateDelta(snapshot, data.delta) as Runs.RunStep;
+ this.#runStepSnapshots[event.data.id] = accumulated;
+ }
+
+ return this.#runStepSnapshots[event.data.id] as Runs.RunStep;
+
+ case 'thread.run.step.completed':
+ case 'thread.run.step.failed':
+ case 'thread.run.step.cancelled':
+ case 'thread.run.step.expired':
+ case 'thread.run.step.in_progress':
+ this.#runStepSnapshots[event.data.id] = event.data;
+ break;
+ }
+
+ if (this.#runStepSnapshots[event.data.id]) return this.#runStepSnapshots[event.data.id] as Runs.RunStep;
+ throw new Error('No snapshot available');
+ }
+
+ #accumulateMessage(
+ event: AssistantStreamEvent,
+ snapshot: Message | undefined,
+ ): [Message, MessageContentDelta[]] {
+ let newContent: MessageContentDelta[] = [];
+
+ switch (event.event) {
+ case 'thread.message.created':
+ //On creation the snapshot is just the initial message
+ return [event.data, newContent];
+
+ case 'thread.message.delta':
+ if (!snapshot) {
+ throw Error(
+ 'Received a delta with no existing snapshot (there should be one from message creation)',
+ );
+ }
+
+ let data = event.data;
+
+ //If this delta does not have content, nothing to process
+ if (data.delta.content) {
+ for (const contentElement of data.delta.content) {
+ if (contentElement.index in snapshot.content) {
+ let currentContent = snapshot.content[contentElement.index];
+ snapshot.content[contentElement.index] = this.#accumulateContent(
+ contentElement,
+ currentContent,
+ );
+ } else {
+ snapshot.content[contentElement.index] = contentElement as
+ | TextContentBlock
+ | ImageFileContentBlock;
+ //This is a new element
+ newContent.push(contentElement);
+ }
+ }
+ }
+
+ return [snapshot, newContent];
+
+ case 'thread.message.in_progress':
+ case 'thread.message.completed':
+ case 'thread.message.incomplete':
+ //No changes on other thread events
+ if (snapshot) {
+ return [snapshot, newContent];
+ } else {
+ throw Error('Received thread message event with no existing snapshot');
+ }
+ }
+ throw Error('Tried to accumulate a non-message event');
+ }
+
+ #accumulateContent(
+ contentElement: MessageContentDelta,
+ currentContent: TextContentBlock | ImageFileContentBlock | undefined,
+ ): TextContentBlock | ImageFileContentBlock {
+ return AssistantStream.accumulateDelta(currentContent as unknown as Record, contentElement) as
+ | TextContentBlock
+ | ImageFileContentBlock;
+ }
+
+ static accumulateDelta(acc: Record, delta: Record): Record {
+ for (const [key, deltaValue] of Object.entries(delta)) {
+ if (!acc.hasOwnProperty(key)) {
+ acc[key] = deltaValue;
+ continue;
+ }
+
+ let accValue = acc[key];
+ if (accValue === null || accValue === undefined) {
+ acc[key] = deltaValue;
+ continue;
+ }
+
+ // We don't accumulate these special properties
+ if (key === 'index' || key === 'type') {
+ acc[key] = deltaValue;
+ continue;
+ }
+
+ // Type-specific accumulation logic
+ if (typeof accValue === 'string' && typeof deltaValue === 'string') {
+ accValue += deltaValue;
+ } else if (typeof accValue === 'number' && typeof deltaValue === 'number') {
+ accValue += deltaValue;
+ } else if (Core.isObj(accValue) && Core.isObj(deltaValue)) {
+ accValue = this.accumulateDelta(accValue as Record, deltaValue as Record);
+ } else if (Array.isArray(accValue) && Array.isArray(deltaValue)) {
+ if (accValue.every((x) => typeof x === 'string' || typeof x === 'number')) {
+ accValue.push(...deltaValue); // Use spread syntax for efficient addition
+ continue;
+ }
+ } else {
+ throw Error(`Unhandled record type: ${key}, deltaValue: ${deltaValue}, accValue: ${accValue}`);
+ }
+ acc[key] = accValue;
+ }
+
+ return acc;
+ }
+
+ #handleRun(event: RunStreamEvent) {
+ this.#currentRunSnapshot = event.data;
+ switch (event.event) {
+ case 'thread.run.created':
+ break;
+ case 'thread.run.queued':
+ break;
+ case 'thread.run.in_progress':
+ break;
+ case 'thread.run.requires_action':
+ case 'thread.run.cancelled':
+ case 'thread.run.failed':
+ case 'thread.run.completed':
+ case 'thread.run.expired':
+ this.#finalRun = event.data;
+ if (this.#currentToolCall) {
+ this._emit('toolCallDone', this.#currentToolCall);
+ this.#currentToolCall = undefined;
+ }
+ break;
+ case 'thread.run.cancelling':
+ break;
+ }
+ }
+}
diff --git a/src/resources/beta/assistants/assistants.ts b/src/resources/beta/assistants/assistants.ts
index 08abb2c91..b4e92fd92 100644
--- a/src/resources/beta/assistants/assistants.ts
+++ b/src/resources/beta/assistants/assistants.ts
@@ -6,6 +6,10 @@ import { isRequestOptions } from 'openai/core';
import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
import * as Shared from 'openai/resources/shared';
import * as FilesAPI from 'openai/resources/beta/assistants/files';
+import * as ThreadsAPI from 'openai/resources/beta/threads/threads';
+import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages';
+import * as RunsAPI from 'openai/resources/beta/threads/runs/runs';
+import * as StepsAPI from 'openai/resources/beta/threads/runs/steps';
import { CursorPage, type CursorPageParams } from 'openai/pagination';
export class Assistants extends APIResource {
@@ -145,40 +149,777 @@ export interface Assistant {
* A list of tool enabled on the assistant. There can be a maximum of 128 tools per
* assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
*/
- tools: Array;
+ tools: Array;
}
-export namespace Assistant {
- export interface CodeInterpreter {
+export interface AssistantDeleted {
+ id: string;
+
+ deleted: boolean;
+
+ object: 'assistant.deleted';
+}
+
+/**
+ * Represents an event emitted when streaming a Run.
+ *
+ * Each event in a server-sent events stream has an `event` and `data` property:
+ *
+ * ```
+ * event: thread.created
+ * data: {"id": "thread_123", "object": "thread", ...}
+ * ```
+ *
+ * We emit events whenever a new object is created, transitions to a new state, or
+ * is being streamed in parts (deltas). For example, we emit `thread.run.created`
+ * when a new run is created, `thread.run.completed` when a run completes, and so
+ * on. When an Assistant chooses to create a message during a run, we emit a
+ * `thread.message.created event`, a `thread.message.in_progress` event, many
+ * `thread.message.delta` events, and finally a `thread.message.completed` event.
+ *
+ * We may add additional events over time, so we recommend handling unknown events
+ * gracefully in your code. See the
+ * [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview)
+ * to learn how to integrate the Assistants API with streaming.
+ */
+export type AssistantStreamEvent =
+ | AssistantStreamEvent.ThreadCreated
+ | AssistantStreamEvent.ThreadRunCreated
+ | AssistantStreamEvent.ThreadRunQueued
+ | AssistantStreamEvent.ThreadRunInProgress
+ | AssistantStreamEvent.ThreadRunRequiresAction
+ | AssistantStreamEvent.ThreadRunCompleted
+ | AssistantStreamEvent.ThreadRunFailed
+ | AssistantStreamEvent.ThreadRunCancelling
+ | AssistantStreamEvent.ThreadRunCancelled
+ | AssistantStreamEvent.ThreadRunExpired
+ | AssistantStreamEvent.ThreadRunStepCreated
+ | AssistantStreamEvent.ThreadRunStepInProgress
+ | AssistantStreamEvent.ThreadRunStepDelta
+ | AssistantStreamEvent.ThreadRunStepCompleted
+ | AssistantStreamEvent.ThreadRunStepFailed
+ | AssistantStreamEvent.ThreadRunStepCancelled
+ | AssistantStreamEvent.ThreadRunStepExpired
+ | AssistantStreamEvent.ThreadMessageCreated
+ | AssistantStreamEvent.ThreadMessageInProgress
+ | AssistantStreamEvent.ThreadMessageDelta
+ | AssistantStreamEvent.ThreadMessageCompleted
+ | AssistantStreamEvent.ThreadMessageIncomplete
+ | AssistantStreamEvent.ErrorEvent;
+
+export namespace AssistantStreamEvent {
+ /**
+ * Occurs when a new
+ * [thread](https://platform.openai.com/docs/api-reference/threads/object) is
+ * created.
+ */
+ export interface ThreadCreated {
+ /**
+ * Represents a thread that contains
+ * [messages](https://platform.openai.com/docs/api-reference/messages).
+ */
+ data: ThreadsAPI.Thread;
+
+ event: 'thread.created';
+ }
+
+ /**
+ * Occurs when a new
+ * [run](https://platform.openai.com/docs/api-reference/runs/object) is created.
+ */
+ export interface ThreadRunCreated {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.created';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * moves to a `queued` status.
+ */
+ export interface ThreadRunQueued {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.queued';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * moves to an `in_progress` status.
+ */
+ export interface ThreadRunInProgress {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.in_progress';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * moves to a `requires_action` status.
+ */
+ export interface ThreadRunRequiresAction {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.requires_action';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * is completed.
+ */
+ export interface ThreadRunCompleted {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.completed';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * fails.
+ */
+ export interface ThreadRunFailed {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.failed';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * moves to a `cancelling` status.
+ */
+ export interface ThreadRunCancelling {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.cancelling';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * is cancelled.
+ */
+ export interface ThreadRunCancelled {
/**
- * The type of tool being defined: `code_interpreter`
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
*/
- type: 'code_interpreter';
+ data: RunsAPI.Run;
+
+ event: 'thread.run.cancelled';
}
- export interface Retrieval {
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * expires.
+ */
+ export interface ThreadRunExpired {
/**
- * The type of tool being defined: `retrieval`
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
*/
- type: 'retrieval';
+ data: RunsAPI.Run;
+
+ event: 'thread.run.expired';
}
- export interface Function {
- function: Shared.FunctionDefinition;
+ /**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is
+ * created.
+ */
+ export interface ThreadRunStepCreated {
+ /**
+ * Represents a step in execution of a run.
+ */
+ data: StepsAPI.RunStep;
+
+ event: 'thread.run.step.created';
+ }
+ /**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object)
+ * moves to an `in_progress` state.
+ */
+ export interface ThreadRunStepInProgress {
/**
- * The type of tool being defined: `function`
+ * Represents a step in execution of a run.
*/
- type: 'function';
+ data: StepsAPI.RunStep;
+
+ event: 'thread.run.step.in_progress';
+ }
+
+ /**
+ * Occurs when parts of a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) are
+ * being streamed.
+ */
+ export interface ThreadRunStepDelta {
+ /**
+ * Represents a run step delta i.e. any changed fields on a run step during
+ * streaming.
+ */
+ data: StepsAPI.RunStepDeltaEvent;
+
+ event: 'thread.run.step.delta';
+ }
+
+ /**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is
+ * completed.
+ */
+ export interface ThreadRunStepCompleted {
+ /**
+ * Represents a step in execution of a run.
+ */
+ data: StepsAPI.RunStep;
+
+ event: 'thread.run.step.completed';
+ }
+
+ /**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object)
+ * fails.
+ */
+ export interface ThreadRunStepFailed {
+ /**
+ * Represents a step in execution of a run.
+ */
+ data: StepsAPI.RunStep;
+
+ event: 'thread.run.step.failed';
+ }
+
+ /**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is
+ * cancelled.
+ */
+ export interface ThreadRunStepCancelled {
+ /**
+ * Represents a step in execution of a run.
+ */
+ data: StepsAPI.RunStep;
+
+ event: 'thread.run.step.cancelled';
+ }
+
+ /**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object)
+ * expires.
+ */
+ export interface ThreadRunStepExpired {
+ /**
+ * Represents a step in execution of a run.
+ */
+ data: StepsAPI.RunStep;
+
+ event: 'thread.run.step.expired';
+ }
+
+ /**
+ * Occurs when a
+ * [message](https://platform.openai.com/docs/api-reference/messages/object) is
+ * created.
+ */
+ export interface ThreadMessageCreated {
+ /**
+ * Represents a message within a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: MessagesAPI.Message;
+
+ event: 'thread.message.created';
+ }
+
+ /**
+ * Occurs when a
+ * [message](https://platform.openai.com/docs/api-reference/messages/object) moves
+ * to an `in_progress` state.
+ */
+ export interface ThreadMessageInProgress {
+ /**
+ * Represents a message within a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: MessagesAPI.Message;
+
+ event: 'thread.message.in_progress';
+ }
+
+ /**
+ * Occurs when parts of a
+ * [Message](https://platform.openai.com/docs/api-reference/messages/object) are
+ * being streamed.
+ */
+ export interface ThreadMessageDelta {
+ /**
+ * Represents a message delta i.e. any changed fields on a message during
+ * streaming.
+ */
+ data: MessagesAPI.MessageDeltaEvent;
+
+ event: 'thread.message.delta';
+ }
+
+ /**
+ * Occurs when a
+ * [message](https://platform.openai.com/docs/api-reference/messages/object) is
+ * completed.
+ */
+ export interface ThreadMessageCompleted {
+ /**
+ * Represents a message within a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: MessagesAPI.Message;
+
+ event: 'thread.message.completed';
+ }
+
+ /**
+ * Occurs when a
+ * [message](https://platform.openai.com/docs/api-reference/messages/object) ends
+ * before it is completed.
+ */
+ export interface ThreadMessageIncomplete {
+ /**
+ * Represents a message within a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: MessagesAPI.Message;
+
+ event: 'thread.message.incomplete';
+ }
+
+ /**
+ * Occurs when an
+ * [error](https://platform.openai.com/docs/guides/error-codes/api-errors) occurs.
+ * This can happen due to an internal server error or a timeout.
+ */
+ export interface ErrorEvent {
+ data: Shared.ErrorObject;
+
+ event: 'error';
}
}
-export interface AssistantDeleted {
- id: string;
+export type AssistantTool = CodeInterpreterTool | RetrievalTool | FunctionTool;
- deleted: boolean;
+export interface CodeInterpreterTool {
+ /**
+ * The type of tool being defined: `code_interpreter`
+ */
+ type: 'code_interpreter';
+}
- object: 'assistant.deleted';
+export interface FunctionTool {
+ function: Shared.FunctionDefinition;
+
+ /**
+ * The type of tool being defined: `function`
+ */
+ type: 'function';
+}
+
+/**
+ * Occurs when a
+ * [message](https://platform.openai.com/docs/api-reference/messages/object) is
+ * created.
+ */
+export type MessageStreamEvent =
+ | MessageStreamEvent.ThreadMessageCreated
+ | MessageStreamEvent.ThreadMessageInProgress
+ | MessageStreamEvent.ThreadMessageDelta
+ | MessageStreamEvent.ThreadMessageCompleted
+ | MessageStreamEvent.ThreadMessageIncomplete;
+
+export namespace MessageStreamEvent {
+ /**
+ * Occurs when a
+ * [message](https://platform.openai.com/docs/api-reference/messages/object) is
+ * created.
+ */
+ export interface ThreadMessageCreated {
+ /**
+ * Represents a message within a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: MessagesAPI.Message;
+
+ event: 'thread.message.created';
+ }
+
+ /**
+ * Occurs when a
+ * [message](https://platform.openai.com/docs/api-reference/messages/object) moves
+ * to an `in_progress` state.
+ */
+ export interface ThreadMessageInProgress {
+ /**
+ * Represents a message within a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: MessagesAPI.Message;
+
+ event: 'thread.message.in_progress';
+ }
+
+ /**
+ * Occurs when parts of a
+ * [Message](https://platform.openai.com/docs/api-reference/messages/object) are
+ * being streamed.
+ */
+ export interface ThreadMessageDelta {
+ /**
+ * Represents a message delta i.e. any changed fields on a message during
+ * streaming.
+ */
+ data: MessagesAPI.MessageDeltaEvent;
+
+ event: 'thread.message.delta';
+ }
+
+ /**
+ * Occurs when a
+ * [message](https://platform.openai.com/docs/api-reference/messages/object) is
+ * completed.
+ */
+ export interface ThreadMessageCompleted {
+ /**
+ * Represents a message within a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: MessagesAPI.Message;
+
+ event: 'thread.message.completed';
+ }
+
+ /**
+ * Occurs when a
+ * [message](https://platform.openai.com/docs/api-reference/messages/object) ends
+ * before it is completed.
+ */
+ export interface ThreadMessageIncomplete {
+ /**
+ * Represents a message within a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: MessagesAPI.Message;
+
+ event: 'thread.message.incomplete';
+ }
+}
+
+export interface RetrievalTool {
+ /**
+ * The type of tool being defined: `retrieval`
+ */
+ type: 'retrieval';
+}
+
+/**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is
+ * created.
+ */
+export type RunStepStreamEvent =
+ | RunStepStreamEvent.ThreadRunStepCreated
+ | RunStepStreamEvent.ThreadRunStepInProgress
+ | RunStepStreamEvent.ThreadRunStepDelta
+ | RunStepStreamEvent.ThreadRunStepCompleted
+ | RunStepStreamEvent.ThreadRunStepFailed
+ | RunStepStreamEvent.ThreadRunStepCancelled
+ | RunStepStreamEvent.ThreadRunStepExpired;
+
+export namespace RunStepStreamEvent {
+ /**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is
+ * created.
+ */
+ export interface ThreadRunStepCreated {
+ /**
+ * Represents a step in execution of a run.
+ */
+ data: StepsAPI.RunStep;
+
+ event: 'thread.run.step.created';
+ }
+
+ /**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object)
+ * moves to an `in_progress` state.
+ */
+ export interface ThreadRunStepInProgress {
+ /**
+ * Represents a step in execution of a run.
+ */
+ data: StepsAPI.RunStep;
+
+ event: 'thread.run.step.in_progress';
+ }
+
+ /**
+ * Occurs when parts of a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) are
+ * being streamed.
+ */
+ export interface ThreadRunStepDelta {
+ /**
+ * Represents a run step delta i.e. any changed fields on a run step during
+ * streaming.
+ */
+ data: StepsAPI.RunStepDeltaEvent;
+
+ event: 'thread.run.step.delta';
+ }
+
+ /**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is
+ * completed.
+ */
+ export interface ThreadRunStepCompleted {
+ /**
+ * Represents a step in execution of a run.
+ */
+ data: StepsAPI.RunStep;
+
+ event: 'thread.run.step.completed';
+ }
+
+ /**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object)
+ * fails.
+ */
+ export interface ThreadRunStepFailed {
+ /**
+ * Represents a step in execution of a run.
+ */
+ data: StepsAPI.RunStep;
+
+ event: 'thread.run.step.failed';
+ }
+
+ /**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is
+ * cancelled.
+ */
+ export interface ThreadRunStepCancelled {
+ /**
+ * Represents a step in execution of a run.
+ */
+ data: StepsAPI.RunStep;
+
+ event: 'thread.run.step.cancelled';
+ }
+
+ /**
+ * Occurs when a
+ * [run step](https://platform.openai.com/docs/api-reference/runs/step-object)
+ * expires.
+ */
+ export interface ThreadRunStepExpired {
+ /**
+ * Represents a step in execution of a run.
+ */
+ data: StepsAPI.RunStep;
+
+ event: 'thread.run.step.expired';
+ }
+}
+
+/**
+ * Occurs when a new
+ * [run](https://platform.openai.com/docs/api-reference/runs/object) is created.
+ */
+export type RunStreamEvent =
+ | RunStreamEvent.ThreadRunCreated
+ | RunStreamEvent.ThreadRunQueued
+ | RunStreamEvent.ThreadRunInProgress
+ | RunStreamEvent.ThreadRunRequiresAction
+ | RunStreamEvent.ThreadRunCompleted
+ | RunStreamEvent.ThreadRunFailed
+ | RunStreamEvent.ThreadRunCancelling
+ | RunStreamEvent.ThreadRunCancelled
+ | RunStreamEvent.ThreadRunExpired;
+
+export namespace RunStreamEvent {
+ /**
+ * Occurs when a new
+ * [run](https://platform.openai.com/docs/api-reference/runs/object) is created.
+ */
+ export interface ThreadRunCreated {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.created';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * moves to a `queued` status.
+ */
+ export interface ThreadRunQueued {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.queued';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * moves to an `in_progress` status.
+ */
+ export interface ThreadRunInProgress {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.in_progress';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * moves to a `requires_action` status.
+ */
+ export interface ThreadRunRequiresAction {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.requires_action';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * is completed.
+ */
+ export interface ThreadRunCompleted {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.completed';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * fails.
+ */
+ export interface ThreadRunFailed {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.failed';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * moves to a `cancelling` status.
+ */
+ export interface ThreadRunCancelling {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.cancelling';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * is cancelled.
+ */
+ export interface ThreadRunCancelled {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.cancelled';
+ }
+
+ /**
+ * Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
+ * expires.
+ */
+ export interface ThreadRunExpired {
+ /**
+ * Represents an execution run on a
+ * [thread](https://platform.openai.com/docs/api-reference/threads).
+ */
+ data: RunsAPI.Run;
+
+ event: 'thread.run.expired';
+ }
+}
+
+/**
+ * Occurs when a new
+ * [thread](https://platform.openai.com/docs/api-reference/threads/object) is
+ * created.
+ */
+export interface ThreadStreamEvent {
+ /**
+ * Represents a thread that contains
+ * [messages](https://platform.openai.com/docs/api-reference/messages).
+ */
+ data: ThreadsAPI.Thread;
+
+ event: 'thread.created';
}
export interface AssistantCreateParams {
@@ -226,36 +967,7 @@ export interface AssistantCreateParams {
* A list of tool enabled on the assistant. There can be a maximum of 128 tools per
* assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
*/
- tools?: Array<
- | AssistantCreateParams.AssistantToolsCode
- | AssistantCreateParams.AssistantToolsRetrieval
- | AssistantCreateParams.AssistantToolsFunction
- >;
-}
-
-export namespace AssistantCreateParams {
- export interface AssistantToolsCode {
- /**
- * The type of tool being defined: `code_interpreter`
- */
- type: 'code_interpreter';
- }
-
- export interface AssistantToolsRetrieval {
- /**
- * The type of tool being defined: `retrieval`
- */
- type: 'retrieval';
- }
-
- export interface AssistantToolsFunction {
- function: Shared.FunctionDefinition;
-
- /**
- * The type of tool being defined: `function`
- */
- type: 'function';
- }
+ tools?: Array;
}
export interface AssistantUpdateParams {
@@ -305,36 +1017,7 @@ export interface AssistantUpdateParams {
* A list of tool enabled on the assistant. There can be a maximum of 128 tools per
* assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
*/
- tools?: Array<
- | AssistantUpdateParams.AssistantToolsCode
- | AssistantUpdateParams.AssistantToolsRetrieval
- | AssistantUpdateParams.AssistantToolsFunction
- >;
-}
-
-export namespace AssistantUpdateParams {
- export interface AssistantToolsCode {
- /**
- * The type of tool being defined: `code_interpreter`
- */
- type: 'code_interpreter';
- }
-
- export interface AssistantToolsRetrieval {
- /**
- * The type of tool being defined: `retrieval`
- */
- type: 'retrieval';
- }
-
- export interface AssistantToolsFunction {
- function: Shared.FunctionDefinition;
-
- /**
- * The type of tool being defined: `function`
- */
- type: 'function';
- }
+ tools?: Array;
}
export interface AssistantListParams extends CursorPageParams {
@@ -356,6 +1039,15 @@ export interface AssistantListParams extends CursorPageParams {
export namespace Assistants {
export import Assistant = AssistantsAPI.Assistant;
export import AssistantDeleted = AssistantsAPI.AssistantDeleted;
+ export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent;
+ export import AssistantTool = AssistantsAPI.AssistantTool;
+ export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool;
+ export import FunctionTool = AssistantsAPI.FunctionTool;
+ export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent;
+ export import RetrievalTool = AssistantsAPI.RetrievalTool;
+ export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent;
+ export import RunStreamEvent = AssistantsAPI.RunStreamEvent;
+ export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent;
export import AssistantsPage = AssistantsAPI.AssistantsPage;
export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams;
export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams;
diff --git a/src/resources/beta/assistants/index.ts b/src/resources/beta/assistants/index.ts
index 5236bc8de..0ae8c9c67 100644
--- a/src/resources/beta/assistants/index.ts
+++ b/src/resources/beta/assistants/index.ts
@@ -3,6 +3,15 @@
export {
Assistant,
AssistantDeleted,
+ AssistantStreamEvent,
+ AssistantTool,
+ CodeInterpreterTool,
+ FunctionTool,
+ MessageStreamEvent,
+ RetrievalTool,
+ RunStepStreamEvent,
+ RunStreamEvent,
+ ThreadStreamEvent,
AssistantCreateParams,
AssistantUpdateParams,
AssistantListParams,
diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts
index 5fd99990d..74056ed1d 100644
--- a/src/resources/beta/beta.ts
+++ b/src/resources/beta/beta.ts
@@ -16,6 +16,15 @@ export namespace Beta {
export import Assistants = AssistantsAPI.Assistants;
export import Assistant = AssistantsAPI.Assistant;
export import AssistantDeleted = AssistantsAPI.AssistantDeleted;
+ export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent;
+ export import AssistantTool = AssistantsAPI.AssistantTool;
+ export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool;
+ export import FunctionTool = AssistantsAPI.FunctionTool;
+ export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent;
+ export import RetrievalTool = AssistantsAPI.RetrievalTool;
+ export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent;
+ export import RunStreamEvent = AssistantsAPI.RunStreamEvent;
+ export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent;
export import AssistantsPage = AssistantsAPI.AssistantsPage;
export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams;
export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams;
@@ -26,4 +35,7 @@ export namespace Beta {
export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams;
export import ThreadUpdateParams = ThreadsAPI.ThreadUpdateParams;
export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams;
+ export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming;
+ export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming;
+ export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams;
}
diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts
index 4ed7e84b1..d8770c29a 100644
--- a/src/resources/beta/index.ts
+++ b/src/resources/beta/index.ts
@@ -3,6 +3,15 @@
export {
Assistant,
AssistantDeleted,
+ AssistantStreamEvent,
+ AssistantTool,
+ CodeInterpreterTool,
+ FunctionTool,
+ MessageStreamEvent,
+ RetrievalTool,
+ RunStepStreamEvent,
+ RunStreamEvent,
+ ThreadStreamEvent,
AssistantCreateParams,
AssistantUpdateParams,
AssistantListParams,
@@ -17,5 +26,8 @@ export {
ThreadCreateParams,
ThreadUpdateParams,
ThreadCreateAndRunParams,
+ ThreadCreateAndRunParamsNonStreaming,
+ ThreadCreateAndRunParamsStreaming,
+ ThreadCreateAndRunStreamParams,
Threads,
} from './threads/index';
diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts
index 54a02dd03..3585be846 100644
--- a/src/resources/beta/threads/index.ts
+++ b/src/resources/beta/threads/index.ts
@@ -1,14 +1,30 @@
// File generated from our OpenAPI spec by Stainless.
export {
- MessageContentImageFile,
- MessageContentText,
- ThreadMessage,
- ThreadMessageDeleted,
+ Annotation,
+ AnnotationDelta,
+ FileCitationAnnotation,
+ FileCitationDeltaAnnotation,
+ FilePathAnnotation,
+ FilePathDeltaAnnotation,
+ ImageFile,
+ ImageFileContentBlock,
+ ImageFileDelta,
+ ImageFileDeltaBlock,
+ Message,
+ MessageContent,
+ MessageContentDelta,
+ MessageDeleted,
+ MessageDelta,
+ MessageDeltaEvent,
+ Text,
+ TextContentBlock,
+ TextDelta,
+ TextDeltaBlock,
MessageCreateParams,
MessageUpdateParams,
MessageListParams,
- ThreadMessagesPage,
+ MessagesPage,
Messages,
} from './messages/index';
export {
@@ -16,9 +32,15 @@ export {
Run,
RunStatus,
RunCreateParams,
+ RunCreateParamsNonStreaming,
+ RunCreateParamsStreaming,
RunUpdateParams,
RunListParams,
+ RunCreateAndStreamParams,
RunSubmitToolOutputsParams,
+ RunSubmitToolOutputsParamsNonStreaming,
+ RunSubmitToolOutputsParamsStreaming,
+ RunSubmitToolOutputsStreamParams,
RunsPage,
Runs,
} from './runs/index';
@@ -28,5 +50,8 @@ export {
ThreadCreateParams,
ThreadUpdateParams,
ThreadCreateAndRunParams,
+ ThreadCreateAndRunParamsNonStreaming,
+ ThreadCreateAndRunParamsStreaming,
+ ThreadCreateAndRunStreamParams,
Threads,
} from './threads';
diff --git a/src/resources/beta/threads/messages/index.ts b/src/resources/beta/threads/messages/index.ts
index cde22c2a9..f68edbbd4 100644
--- a/src/resources/beta/threads/messages/index.ts
+++ b/src/resources/beta/threads/messages/index.ts
@@ -1,14 +1,30 @@
// File generated from our OpenAPI spec by Stainless.
export {
- MessageContentImageFile,
- MessageContentText,
- ThreadMessage,
- ThreadMessageDeleted,
+ Annotation,
+ AnnotationDelta,
+ FileCitationAnnotation,
+ FileCitationDeltaAnnotation,
+ FilePathAnnotation,
+ FilePathDeltaAnnotation,
+ ImageFile,
+ ImageFileContentBlock,
+ ImageFileDelta,
+ ImageFileDeltaBlock,
+ Message,
+ MessageContent,
+ MessageContentDelta,
+ MessageDeleted,
+ MessageDelta,
+ MessageDeltaEvent,
+ Text,
+ TextContentBlock,
+ TextDelta,
+ TextDeltaBlock,
MessageCreateParams,
MessageUpdateParams,
MessageListParams,
- ThreadMessagesPage,
+ MessagesPage,
Messages,
} from './messages';
export { MessageFile, FileListParams, MessageFilesPage, Files } from './files';
diff --git a/src/resources/beta/threads/messages/messages.ts b/src/resources/beta/threads/messages/messages.ts
index 40b436829..b38a4bbf0 100644
--- a/src/resources/beta/threads/messages/messages.ts
+++ b/src/resources/beta/threads/messages/messages.ts
@@ -17,7 +17,7 @@ export class Messages extends APIResource {
threadId: string,
body: MessageCreateParams,
options?: Core.RequestOptions,
- ): Core.APIPromise {
+ ): Core.APIPromise {
return this._client.post(`/threads/${threadId}/messages`, {
body,
...options,
@@ -28,11 +28,7 @@ export class Messages extends APIResource {
/**
* Retrieve a message.
*/
- retrieve(
- threadId: string,
- messageId: string,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
+ retrieve(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/threads/${threadId}/messages/${messageId}`, {
...options,
headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
@@ -47,7 +43,7 @@ export class Messages extends APIResource {
messageId: string,
body: MessageUpdateParams,
options?: Core.RequestOptions,
- ): Core.APIPromise {
+ ): Core.APIPromise {
return this._client.post(`/threads/${threadId}/messages/${messageId}`, {
body,
...options,
@@ -62,17 +58,17 @@ export class Messages extends APIResource {
threadId: string,
query?: MessageListParams,
options?: Core.RequestOptions,
- ): Core.PagePromise;
- list(threadId: string, options?: Core.RequestOptions): Core.PagePromise;
+ ): Core.PagePromise;
+ list(threadId: string, options?: Core.RequestOptions): Core.PagePromise;
list(
threadId: string,
query: MessageListParams | Core.RequestOptions = {},
options?: Core.RequestOptions,
- ): Core.PagePromise {
+ ): Core.PagePromise {
if (isRequestOptions(query)) {
return this.list(threadId, {}, query);
}
- return this._client.getAPIList(`/threads/${threadId}/messages`, ThreadMessagesPage, {
+ return this._client.getAPIList(`/threads/${threadId}/messages`, MessagesPage, {
query,
...options,
headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
@@ -80,129 +76,220 @@ export class Messages extends APIResource {
}
}
-export class ThreadMessagesPage extends CursorPage {}
+export class MessagesPage extends CursorPage {}
/**
- * References an image [File](https://platform.openai.com/docs/api-reference/files)
- * in the content of a message.
+ * A citation within the message that points to a specific quote from a specific
+ * File associated with the assistant or the message. Generated when the assistant
+ * uses the "retrieval" tool to search files.
+ */
+export type Annotation = FileCitationAnnotation | FilePathAnnotation;
+
+/**
+ * A citation within the message that points to a specific quote from a specific
+ * File associated with the assistant or the message. Generated when the assistant
+ * uses the "retrieval" tool to search files.
+ */
+export type AnnotationDelta = FileCitationDeltaAnnotation | FilePathDeltaAnnotation;
+
+/**
+ * A citation within the message that points to a specific quote from a specific
+ * File associated with the assistant or the message. Generated when the assistant
+ * uses the "retrieval" tool to search files.
*/
-export interface MessageContentImageFile {
- image_file: MessageContentImageFile.ImageFile;
+export interface FileCitationAnnotation {
+ end_index: number;
+
+ file_citation: FileCitationAnnotation.FileCitation;
+
+ start_index: number;
/**
- * Always `image_file`.
+ * The text in the message content that needs to be replaced.
*/
- type: 'image_file';
+ text: string;
+
+ /**
+ * Always `file_citation`.
+ */
+ type: 'file_citation';
}
-export namespace MessageContentImageFile {
- export interface ImageFile {
+export namespace FileCitationAnnotation {
+ export interface FileCitation {
/**
- * The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
- * in the message content.
+ * The ID of the specific File the citation is from.
*/
file_id: string;
+
+ /**
+ * The specific quote in the file.
+ */
+ quote: string;
}
}
/**
- * The text content that is part of a message.
+ * A citation within the message that points to a specific quote from a specific
+ * File associated with the assistant or the message. Generated when the assistant
+ * uses the "retrieval" tool to search files.
*/
-export interface MessageContentText {
- text: MessageContentText.Text;
+export interface FileCitationDeltaAnnotation {
+ /**
+ * The index of the annotation in the text content part.
+ */
+ index: number;
/**
- * Always `text`.
+ * Always `file_citation`.
*/
- type: 'text';
+ type: 'file_citation';
+
+ end_index?: number;
+
+ file_citation?: FileCitationDeltaAnnotation.FileCitation;
+
+ start_index?: number;
+
+ /**
+ * The text in the message content that needs to be replaced.
+ */
+ text?: string;
}
-export namespace MessageContentText {
- export interface Text {
- annotations: Array;
+export namespace FileCitationDeltaAnnotation {
+ export interface FileCitation {
+ /**
+ * The ID of the specific File the citation is from.
+ */
+ file_id?: string;
/**
- * The data that makes up the text.
+ * The specific quote in the file.
*/
- value: string;
+ quote?: string;
}
+}
+
+/**
+ * A URL for the file that's generated when the assistant used the
+ * `code_interpreter` tool to generate a file.
+ */
+export interface FilePathAnnotation {
+ end_index: number;
+
+ file_path: FilePathAnnotation.FilePath;
+
+ start_index: number;
+
+ /**
+ * The text in the message content that needs to be replaced.
+ */
+ text: string;
+
+ /**
+ * Always `file_path`.
+ */
+ type: 'file_path';
+}
- export namespace Text {
+export namespace FilePathAnnotation {
+ export interface FilePath {
/**
- * A citation within the message that points to a specific quote from a specific
- * File associated with the assistant or the message. Generated when the assistant
- * uses the "retrieval" tool to search files.
+ * The ID of the file that was generated.
*/
- export interface FileCitation {
- end_index: number;
+ file_id: string;
+ }
+}
+
+/**
+ * A URL for the file that's generated when the assistant used the
+ * `code_interpreter` tool to generate a file.
+ */
+export interface FilePathDeltaAnnotation {
+ /**
+ * The index of the annotation in the text content part.
+ */
+ index: number;
- file_citation: FileCitation.FileCitation;
+ /**
+ * Always `file_path`.
+ */
+ type: 'file_path';
- start_index: number;
+ end_index?: number;
- /**
- * The text in the message content that needs to be replaced.
- */
- text: string;
+ file_path?: FilePathDeltaAnnotation.FilePath;
- /**
- * Always `file_citation`.
- */
- type: 'file_citation';
- }
+ start_index?: number;
- export namespace FileCitation {
- export interface FileCitation {
- /**
- * The ID of the specific File the citation is from.
- */
- file_id: string;
-
- /**
- * The specific quote in the file.
- */
- quote: string;
- }
- }
+ /**
+ * The text in the message content that needs to be replaced.
+ */
+ text?: string;
+}
+export namespace FilePathDeltaAnnotation {
+ export interface FilePath {
/**
- * A URL for the file that's generated when the assistant used the
- * `code_interpreter` tool to generate a file.
+ * The ID of the file that was generated.
*/
- export interface FilePath {
- end_index: number;
+ file_id?: string;
+ }
+}
- file_path: FilePath.FilePath;
+export interface ImageFile {
+ /**
+ * The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
+ * in the message content.
+ */
+ file_id: string;
+}
- start_index: number;
+/**
+ * References an image [File](https://platform.openai.com/docs/api-reference/files)
+ * in the content of a message.
+ */
+export interface ImageFileContentBlock {
+ image_file: ImageFile;
- /**
- * The text in the message content that needs to be replaced.
- */
- text: string;
+ /**
+ * Always `image_file`.
+ */
+ type: 'image_file';
+}
- /**
- * Always `file_path`.
- */
- type: 'file_path';
- }
+export interface ImageFileDelta {
+ /**
+ * The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
+ * in the message content.
+ */
+ file_id?: string;
+}
- export namespace FilePath {
- export interface FilePath {
- /**
- * The ID of the file that was generated.
- */
- file_id: string;
- }
- }
- }
+/**
+ * References an image [File](https://platform.openai.com/docs/api-reference/files)
+ * in the content of a message.
+ */
+export interface ImageFileDeltaBlock {
+ /**
+ * The index of the content part in the message.
+ */
+ index: number;
+
+ /**
+ * Always `image_file`.
+ */
+ type: 'image_file';
+
+ image_file?: ImageFileDelta;
}
/**
* Represents a message within a
* [thread](https://platform.openai.com/docs/api-reference/threads).
*/
-export interface ThreadMessage {
+export interface Message {
/**
* The identifier, which can be referenced in API endpoints.
*/
@@ -215,10 +302,15 @@ export interface ThreadMessage {
*/
assistant_id: string | null;
+ /**
+ * The Unix timestamp (in seconds) for when the message was completed.
+ */
+ completed_at: number | null;
+
/**
* The content of the message in array of text and/or images.
*/
- content: Array;
+ content: Array;
/**
* The Unix timestamp (in seconds) for when the message was created.
@@ -232,6 +324,16 @@ export interface ThreadMessage {
*/
file_ids: Array;
+ /**
+ * The Unix timestamp (in seconds) for when the message was marked as incomplete.
+ */
+ incomplete_at: number | null;
+
+ /**
+ * On an incomplete message, details about why the message is incomplete.
+ */
+ incomplete_details: Message.IncompleteDetails | null;
+
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
* for storing additional information about the object in a structured format. Keys
@@ -257,6 +359,12 @@ export interface ThreadMessage {
*/
run_id: string | null;
+ /**
+ * The status of the message, which can be either `in_progress`, `incomplete`, or
+ * `completed`.
+ */
+ status: 'in_progress' | 'incomplete' | 'completed';
+
/**
* The [thread](https://platform.openai.com/docs/api-reference/threads) ID that
* this message belongs to.
@@ -264,7 +372,31 @@ export interface ThreadMessage {
thread_id: string;
}
-export interface ThreadMessageDeleted {
+export namespace Message {
+ /**
+ * On an incomplete message, details about why the message is incomplete.
+ */
+ export interface IncompleteDetails {
+ /**
+ * The reason the message is incomplete.
+ */
+ reason: 'content_filter' | 'max_tokens' | 'run_cancelled' | 'run_expired' | 'run_failed';
+ }
+}
+
+/**
+ * References an image [File](https://platform.openai.com/docs/api-reference/files)
+ * in the content of a message.
+ */
+export type MessageContent = ImageFileContentBlock | TextContentBlock;
+
+/**
+ * References an image [File](https://platform.openai.com/docs/api-reference/files)
+ * in the content of a message.
+ */
+export type MessageContentDelta = ImageFileDeltaBlock | TextDeltaBlock;
+
+export interface MessageDeleted {
id: string;
deleted: boolean;
@@ -272,6 +404,96 @@ export interface ThreadMessageDeleted {
object: 'thread.message.deleted';
}
+/**
+ * The delta containing the fields that have changed on the Message.
+ */
+export interface MessageDelta {
+ /**
+ * The content of the message in array of text and/or images.
+ */
+ content?: Array;
+
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the assistant should use. Useful for tools like retrieval and code_interpreter
+ * that can access files. A maximum of 10 files can be attached to a message.
+ */
+ file_ids?: Array;
+
+ /**
+ * The entity that produced the message. One of `user` or `assistant`.
+ */
+ role?: 'user' | 'assistant';
+}
+
+/**
+ * Represents a message delta i.e. any changed fields on a message during
+ * streaming.
+ */
+export interface MessageDeltaEvent {
+ /**
+ * The identifier of the message, which can be referenced in API endpoints.
+ */
+ id: string;
+
+ /**
+ * The delta containing the fields that have changed on the Message.
+ */
+ delta: MessageDelta;
+
+ /**
+ * The object type, which is always `thread.message.delta`.
+ */
+ object: 'thread.message.delta';
+}
+
+export interface Text {
+ annotations: Array;
+
+ /**
+ * The data that makes up the text.
+ */
+ value: string;
+}
+
+/**
+ * The text content that is part of a message.
+ */
+export interface TextContentBlock {
+ text: Text;
+
+ /**
+ * Always `text`.
+ */
+ type: 'text';
+}
+
+export interface TextDelta {
+ annotations?: Array;
+
+ /**
+ * The data that makes up the text.
+ */
+ value?: string;
+}
+
+/**
+ * The text content that is part of a message.
+ */
+export interface TextDeltaBlock {
+ /**
+ * The index of the content part in the message.
+ */
+ index: number;
+
+ /**
+ * Always `text`.
+ */
+ type: 'text';
+
+ text?: TextDelta;
+}
+
export interface MessageCreateParams {
/**
* The content of the message.
@@ -328,11 +550,27 @@ export interface MessageListParams extends CursorPageParams {
}
export namespace Messages {
- export import MessageContentImageFile = MessagesAPI.MessageContentImageFile;
- export import MessageContentText = MessagesAPI.MessageContentText;
- export import ThreadMessage = MessagesAPI.ThreadMessage;
- export import ThreadMessageDeleted = MessagesAPI.ThreadMessageDeleted;
- export import ThreadMessagesPage = MessagesAPI.ThreadMessagesPage;
+ export import Annotation = MessagesAPI.Annotation;
+ export import AnnotationDelta = MessagesAPI.AnnotationDelta;
+ export import FileCitationAnnotation = MessagesAPI.FileCitationAnnotation;
+ export import FileCitationDeltaAnnotation = MessagesAPI.FileCitationDeltaAnnotation;
+ export import FilePathAnnotation = MessagesAPI.FilePathAnnotation;
+ export import FilePathDeltaAnnotation = MessagesAPI.FilePathDeltaAnnotation;
+ export import ImageFile = MessagesAPI.ImageFile;
+ export import ImageFileContentBlock = MessagesAPI.ImageFileContentBlock;
+ export import ImageFileDelta = MessagesAPI.ImageFileDelta;
+ export import ImageFileDeltaBlock = MessagesAPI.ImageFileDeltaBlock;
+ export import Message = MessagesAPI.Message;
+ export import MessageContent = MessagesAPI.MessageContent;
+ export import MessageContentDelta = MessagesAPI.MessageContentDelta;
+ export import MessageDeleted = MessagesAPI.MessageDeleted;
+ export import MessageDelta = MessagesAPI.MessageDelta;
+ export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent;
+ export import Text = MessagesAPI.Text;
+ export import TextContentBlock = MessagesAPI.TextContentBlock;
+ export import TextDelta = MessagesAPI.TextDelta;
+ export import TextDeltaBlock = MessagesAPI.TextDeltaBlock;
+ export import MessagesPage = MessagesAPI.MessagesPage;
export import MessageCreateParams = MessagesAPI.MessageCreateParams;
export import MessageUpdateParams = MessagesAPI.MessageUpdateParams;
export import MessageListParams = MessagesAPI.MessageListParams;
diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts
index b11736c5c..7fa34637a 100644
--- a/src/resources/beta/threads/runs/index.ts
+++ b/src/resources/beta/threads/runs/index.ts
@@ -1,11 +1,22 @@
// File generated from our OpenAPI spec by Stainless.
export {
- CodeToolCall,
+ CodeInterpreterLogs,
+ CodeInterpreterOutputImage,
+ CodeInterpreterToolCall,
+ CodeInterpreterToolCallDelta,
FunctionToolCall,
+ FunctionToolCallDelta,
MessageCreationStepDetails,
RetrievalToolCall,
+ RetrievalToolCallDelta,
RunStep,
+ RunStepDelta,
+ RunStepDeltaEvent,
+ RunStepDeltaMessageDelta,
+ ToolCall,
+ ToolCallDelta,
+ ToolCallDeltaObject,
ToolCallsStepDetails,
StepListParams,
RunStepsPage,
@@ -16,9 +27,15 @@ export {
Run,
RunStatus,
RunCreateParams,
+ RunCreateParamsNonStreaming,
+ RunCreateParamsStreaming,
RunUpdateParams,
RunListParams,
+ RunCreateAndStreamParams,
RunSubmitToolOutputsParams,
+ RunSubmitToolOutputsParamsNonStreaming,
+ RunSubmitToolOutputsParamsStreaming,
+ RunSubmitToolOutputsStreamParams,
RunsPage,
Runs,
} from './runs';
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index 9a0bc00dd..8fe09ecc6 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -1,12 +1,16 @@
// File generated from our OpenAPI spec by Stainless.
import * as Core from 'openai/core';
+import { APIPromise } from 'openai/core';
import { APIResource } from 'openai/resource';
import { isRequestOptions } from 'openai/core';
+import { AssistantStream, RunCreateParamsBaseStream } from 'openai/lib/AssistantStream';
+import { RunSubmitToolOutputsParamsStream } from 'openai/lib/AssistantStream';
import * as RunsAPI from 'openai/resources/beta/threads/runs/runs';
-import * as Shared from 'openai/resources/shared';
+import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
import * as StepsAPI from 'openai/resources/beta/threads/runs/steps';
import { CursorPage, type CursorPageParams } from 'openai/pagination';
+import { Stream } from 'openai/streaming';
export class Runs extends APIResource {
steps: StepsAPI.Steps = new StepsAPI.Steps(this._client);
@@ -14,12 +18,28 @@ export class Runs extends APIResource {
/**
* Create a run.
*/
- create(threadId: string, body: RunCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ create(threadId: string, body: RunCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise;
+ create(
+ threadId: string,
+ body: RunCreateParamsStreaming,
+ options?: Core.RequestOptions,
+ ): APIPromise>;
+ create(
+ threadId: string,
+ body: RunCreateParamsBase,
+ options?: Core.RequestOptions,
+ ): APIPromise | Run>;
+ create(
+ threadId: string,
+ body: RunCreateParams,
+ options?: Core.RequestOptions,
+ ): APIPromise | APIPromise> {
return this._client.post(`/threads/${threadId}/runs`, {
body,
...options,
headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
- });
+ stream: body.stream ?? false,
+ }) as APIPromise | APIPromise>;
}
/**
@@ -82,23 +102,72 @@ export class Runs extends APIResource {
});
}
+ /**
+ * Create a Run stream
+ */
+ createAndStream(
+ threadId: string,
+ body: RunCreateParamsBaseStream,
+ options?: Core.RequestOptions,
+ ): AssistantStream {
+ return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options);
+ }
+
/**
* When a run has the `status: "requires_action"` and `required_action.type` is
* `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
* tool calls once they're all completed. All outputs must be submitted in a single
* request.
*/
+ submitToolOutputs(
+ threadId: string,
+ runId: string,
+ body: RunSubmitToolOutputsParamsNonStreaming,
+ options?: Core.RequestOptions,
+ ): APIPromise;
+ submitToolOutputs(
+ threadId: string,
+ runId: string,
+ body: RunSubmitToolOutputsParamsStreaming,
+ options?: Core.RequestOptions,
+ ): APIPromise>;
+ submitToolOutputs(
+ threadId: string,
+ runId: string,
+ body: RunSubmitToolOutputsParamsBase,
+ options?: Core.RequestOptions,
+ ): APIPromise | Run>;
submitToolOutputs(
threadId: string,
runId: string,
body: RunSubmitToolOutputsParams,
options?: Core.RequestOptions,
- ): Core.APIPromise {
+ ): APIPromise | APIPromise> {
return this._client.post(`/threads/${threadId}/runs/${runId}/submit_tool_outputs`, {
body,
...options,
headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
- });
+ stream: body.stream ?? false,
+ }) as APIPromise | APIPromise>;
+ }
+
+ /**
+ * Submit the tool outputs from a previous run and stream the run to a terminal
+ * state.
+ */
+ submitToolOutputsStream(
+ threadId: string,
+ runId: string,
+ body: RunSubmitToolOutputsParamsStream,
+ options?: Core.RequestOptions,
+ ): AssistantStream {
+ return AssistantStream.createToolAssistantStream(
+ threadId,
+ runId,
+ this._client.beta.threads.runs,
+ body,
+ options,
+ );
}
}
@@ -180,7 +249,7 @@ export interface Run {
/**
* The Unix timestamp (in seconds) for when the run will expire.
*/
- expires_at: number;
+ expires_at: number | null;
/**
* The Unix timestamp (in seconds) for when the run failed.
@@ -255,7 +324,7 @@ export interface Run {
* [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
* this run.
*/
- tools: Array;
+ tools: Array;
/**
* Usage statistics related to the run. This value will be `null` if the run is not
@@ -308,29 +377,6 @@ export namespace Run {
}
}
- export interface AssistantToolsCode {
- /**
- * The type of tool being defined: `code_interpreter`
- */
- type: 'code_interpreter';
- }
-
- export interface AssistantToolsRetrieval {
- /**
- * The type of tool being defined: `retrieval`
- */
- type: 'retrieval';
- }
-
- export interface AssistantToolsFunction {
- function: Shared.FunctionDefinition;
-
- /**
- * The type of tool being defined: `function`
- */
- type: 'function';
- }
-
/**
* Usage statistics related to the run. This value will be `null` if the run is not
* in a terminal state (i.e. `in_progress`, `queued`, etc.).
@@ -368,7 +414,9 @@ export type RunStatus =
| 'completed'
| 'expired';
-export interface RunCreateParams {
+export type RunCreateParams = RunCreateParamsNonStreaming | RunCreateParamsStreaming;
+
+export interface RunCreateParamsBase {
/**
* The ID of the
* [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
@@ -406,40 +454,41 @@ export interface RunCreateParams {
*/
model?: string | null;
+ /**
+ * If `true`, returns a stream of events that happen during the Run as server-sent
+ * events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ * message.
+ */
+ stream?: boolean | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
*/
- tools?: Array<
- | RunCreateParams.AssistantToolsCode
- | RunCreateParams.AssistantToolsRetrieval
- | RunCreateParams.AssistantToolsFunction
- > | null;
+ tools?: Array | null;
}
export namespace RunCreateParams {
- export interface AssistantToolsCode {
- /**
- * The type of tool being defined: `code_interpreter`
- */
- type: 'code_interpreter';
- }
-
- export interface AssistantToolsRetrieval {
- /**
- * The type of tool being defined: `retrieval`
- */
- type: 'retrieval';
- }
+ export type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming;
+ export type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
+}
- export interface AssistantToolsFunction {
- function: Shared.FunctionDefinition;
+export interface RunCreateParamsNonStreaming extends RunCreateParamsBase {
+ /**
+ * If `true`, returns a stream of events that happen during the Run as server-sent
+ * events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ * message.
+ */
+ stream?: false | null;
+}
- /**
- * The type of tool being defined: `function`
- */
- type: 'function';
- }
+export interface RunCreateParamsStreaming extends RunCreateParamsBase {
+ /**
+ * If `true`, returns a stream of events that happen during the Run as server-sent
+ * events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ * message.
+ */
+ stream: true;
}
export interface RunUpdateParams {
@@ -468,11 +517,67 @@ export interface RunListParams extends CursorPageParams {
order?: 'asc' | 'desc';
}
-export interface RunSubmitToolOutputsParams {
+export interface RunCreateAndStreamParams {
+ /**
+ * The ID of the
+ * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+ * execute this run.
+ */
+ assistant_id: string;
+
+ /**
+ * Appends additional instructions at the end of the instructions for the run. This
+ * is useful for modifying the behavior on a per-run basis without overriding other
+ * instructions.
+ */
+ additional_instructions?: string | null;
+
+ /**
+ * Overrides the
+ * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+ * of the assistant. This is useful for modifying the behavior on a per-run basis.
+ */
+ instructions?: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+ * be used to execute this run. If a value is provided here, it will override the
+ * model associated with the assistant. If not, the model associated with the
+ * assistant will be used.
+ */
+ model?: string | null;
+
+ /**
+ * Override the tools the assistant can use for this run. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ tools?: Array | null;
+}
+
+export type RunSubmitToolOutputsParams =
+ | RunSubmitToolOutputsParamsNonStreaming
+ | RunSubmitToolOutputsParamsStreaming;
+
+export interface RunSubmitToolOutputsParamsBase {
/**
* A list of tools for which the outputs are being submitted.
*/
tool_outputs: Array;
+
+ /**
+ * If `true`, returns a stream of events that happen during the Run as server-sent
+ * events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ * message.
+ */
+ stream?: boolean | null;
}
export namespace RunSubmitToolOutputsParams {
@@ -488,6 +593,49 @@ export namespace RunSubmitToolOutputsParams {
*/
tool_call_id?: string;
}
+
+ export type RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming;
+ export type RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming;
+}
+
+export interface RunSubmitToolOutputsParamsNonStreaming extends RunSubmitToolOutputsParamsBase {
+ /**
+ * If `true`, returns a stream of events that happen during the Run as server-sent
+ * events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ * message.
+ */
+ stream?: false | null;
+}
+
+export interface RunSubmitToolOutputsParamsStreaming extends RunSubmitToolOutputsParamsBase {
+ /**
+ * If `true`, returns a stream of events that happen during the Run as server-sent
+ * events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ * message.
+ */
+ stream: true;
+}
+
+export interface RunSubmitToolOutputsStreamParams {
+ /**
+ * A list of tools for which the outputs are being submitted.
+ */
+ tool_outputs: Array;
+}
+
+export namespace RunSubmitToolOutputsStreamParams {
+ export interface ToolOutput {
+ /**
+ * The output of the tool call to be submitted to continue the run.
+ */
+ output?: string;
+
+ /**
+ * The ID of the tool call in the `required_action` object within the run object
+ * the output is being submitted for.
+ */
+ tool_call_id?: string;
+ }
}
export namespace Runs {
@@ -496,15 +644,32 @@ export namespace Runs {
export import RunStatus = RunsAPI.RunStatus;
export import RunsPage = RunsAPI.RunsPage;
export import RunCreateParams = RunsAPI.RunCreateParams;
+ export import RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming;
+ export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
export import RunUpdateParams = RunsAPI.RunUpdateParams;
export import RunListParams = RunsAPI.RunListParams;
+ export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams;
export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams;
+ export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming;
+ export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming;
+ export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams;
export import Steps = StepsAPI.Steps;
- export import CodeToolCall = StepsAPI.CodeToolCall;
+ export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs;
+ export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage;
+ export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall;
+ export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta;
export import FunctionToolCall = StepsAPI.FunctionToolCall;
+ export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta;
export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails;
export import RetrievalToolCall = StepsAPI.RetrievalToolCall;
+ export import RetrievalToolCallDelta = StepsAPI.RetrievalToolCallDelta;
export import RunStep = StepsAPI.RunStep;
+ export import RunStepDelta = StepsAPI.RunStepDelta;
+ export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent;
+ export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta;
+ export import ToolCall = StepsAPI.ToolCall;
+ export import ToolCallDelta = StepsAPI.ToolCallDelta;
+ export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject;
export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails;
export import RunStepsPage = StepsAPI.RunStepsPage;
export import StepListParams = StepsAPI.StepListParams;
diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts
index c574c94d1..4218e9769 100644
--- a/src/resources/beta/threads/runs/steps.ts
+++ b/src/resources/beta/threads/runs/steps.ts
@@ -55,10 +55,54 @@ export class Steps extends APIResource {
export class RunStepsPage extends CursorPage {}
+/**
+ * Text output from the Code Interpreter tool call as part of a run step.
+ */
+export interface CodeInterpreterLogs {
+ /**
+ * The index of the output in the outputs array.
+ */
+ index: number;
+
+ /**
+ * Always `logs`.
+ */
+ type: 'logs';
+
+ /**
+ * The text output from the Code Interpreter tool call.
+ */
+ logs?: string;
+}
+
+export interface CodeInterpreterOutputImage {
+ /**
+ * The index of the output in the outputs array.
+ */
+ index: number;
+
+ /**
+ * Always `image`.
+ */
+ type: 'image';
+
+ image?: CodeInterpreterOutputImage.Image;
+}
+
+export namespace CodeInterpreterOutputImage {
+ export interface Image {
+ /**
+ * The [file](https://platform.openai.com/docs/api-reference/files) ID of the
+ * image.
+ */
+ file_id?: string;
+ }
+}
+
/**
* Details of the Code Interpreter tool call the run step was involved in.
*/
-export interface CodeToolCall {
+export interface CodeInterpreterToolCall {
/**
* The ID of the tool call.
*/
@@ -67,7 +111,7 @@ export interface CodeToolCall {
/**
* The Code Interpreter tool call definition.
*/
- code_interpreter: CodeToolCall.CodeInterpreter;
+ code_interpreter: CodeInterpreterToolCall.CodeInterpreter;
/**
* The type of tool call. This is always going to be `code_interpreter` for this
@@ -76,7 +120,7 @@ export interface CodeToolCall {
type: 'code_interpreter';
}
-export namespace CodeToolCall {
+export namespace CodeInterpreterToolCall {
/**
* The Code Interpreter tool call definition.
*/
@@ -131,6 +175,51 @@ export namespace CodeToolCall {
}
}
+/**
+ * Details of the Code Interpreter tool call the run step was involved in.
+ */
+export interface CodeInterpreterToolCallDelta {
+ /**
+ * The index of the tool call in the tool calls array.
+ */
+ index: number;
+
+ /**
+ * The type of tool call. This is always going to be `code_interpreter` for this
+ * type of tool call.
+ */
+ type: 'code_interpreter';
+
+ /**
+ * The ID of the tool call.
+ */
+ id?: string;
+
+ /**
+ * The Code Interpreter tool call definition.
+ */
+ code_interpreter?: CodeInterpreterToolCallDelta.CodeInterpreter;
+}
+
+export namespace CodeInterpreterToolCallDelta {
+ /**
+ * The Code Interpreter tool call definition.
+ */
+ export interface CodeInterpreter {
+ /**
+ * The input to the Code Interpreter tool call.
+ */
+ input?: string;
+
+ /**
+ * The outputs from the Code Interpreter tool call. Code Interpreter can output one
+ * or more items, including text (`logs`) or images (`image`). Each of these are
+ * represented by a different object type.
+ */
+ outputs?: Array;
+ }
+}
+
export interface FunctionToolCall {
/**
* The ID of the tool call object.
@@ -173,6 +262,53 @@ export namespace FunctionToolCall {
}
}
+export interface FunctionToolCallDelta {
+ /**
+ * The index of the tool call in the tool calls array.
+ */
+ index: number;
+
+ /**
+ * The type of tool call. This is always going to be `function` for this type of
+ * tool call.
+ */
+ type: 'function';
+
+ /**
+ * The ID of the tool call object.
+ */
+ id?: string;
+
+ /**
+ * The definition of the function that was called.
+ */
+ function?: FunctionToolCallDelta.Function;
+}
+
+export namespace FunctionToolCallDelta {
+ /**
+ * The definition of the function that was called.
+ */
+ export interface Function {
+ /**
+ * The arguments passed to the function.
+ */
+ arguments?: string;
+
+ /**
+ * The name of the function.
+ */
+ name?: string;
+
+ /**
+ * The output of the function. This will be `null` if the outputs have not been
+ * [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
+ * yet.
+ */
+ output?: string | null;
+ }
+}
+
/**
* Details of the message creation by the run step.
*/
@@ -212,6 +348,29 @@ export interface RetrievalToolCall {
type: 'retrieval';
}
+export interface RetrievalToolCallDelta {
+ /**
+ * The index of the tool call in the tool calls array.
+ */
+ index: number;
+
+ /**
+ * The type of tool call. This is always going to be `retrieval` for this type of
+ * tool call.
+ */
+ type: 'retrieval';
+
+ /**
+ * The ID of the tool call object.
+ */
+ id?: string;
+
+ /**
+ * For now, this is always going to be an empty object.
+ */
+ retrieval?: unknown;
+}
+
/**
* Represents a step in execution of a run.
*/
@@ -347,6 +506,85 @@ export namespace RunStep {
}
}
+/**
+ * The delta containing the fields that have changed on the run step.
+ */
+export interface RunStepDelta {
+ /**
+ * The details of the run step.
+ */
+ step_details?: RunStepDeltaMessageDelta | ToolCallDeltaObject;
+}
+
+/**
+ * Represents a run step delta i.e. any changed fields on a run step during
+ * streaming.
+ */
+export interface RunStepDeltaEvent {
+ /**
+ * The identifier of the run step, which can be referenced in API endpoints.
+ */
+ id: string;
+
+ /**
+ * The delta containing the fields that have changed on the run step.
+ */
+ delta: RunStepDelta;
+
+ /**
+ * The object type, which is always `thread.run.step.delta`.
+ */
+ object: 'thread.run.step.delta';
+}
+
+/**
+ * Details of the message creation by the run step.
+ */
+export interface RunStepDeltaMessageDelta {
+ /**
+ * Always `message_creation`.
+ */
+ type: 'message_creation';
+
+ message_creation?: RunStepDeltaMessageDelta.MessageCreation;
+}
+
+export namespace RunStepDeltaMessageDelta {
+ export interface MessageCreation {
+ /**
+ * The ID of the message that was created by this run step.
+ */
+ message_id?: string;
+ }
+}
+
+/**
+ * Details of the Code Interpreter tool call the run step was involved in.
+ */
+export type ToolCall = CodeInterpreterToolCall | RetrievalToolCall | FunctionToolCall;
+
+/**
+ * Details of the Code Interpreter tool call the run step was involved in.
+ */
+export type ToolCallDelta = CodeInterpreterToolCallDelta | RetrievalToolCallDelta | FunctionToolCallDelta;
+
+/**
+ * Details of the tool call.
+ */
+export interface ToolCallDeltaObject {
+ /**
+ * Always `tool_calls`.
+ */
+ type: 'tool_calls';
+
+ /**
+ * An array of tool calls the run step was involved in. These can be associated
+ * with one of three types of tools: `code_interpreter`, `retrieval`, or
+ * `function`.
+ */
+ tool_calls?: Array;
+}
+
/**
* Details of the tool call.
*/
@@ -356,7 +594,7 @@ export interface ToolCallsStepDetails {
* with one of three types of tools: `code_interpreter`, `retrieval`, or
* `function`.
*/
- tool_calls: Array;
+ tool_calls: Array;
/**
* Always `tool_calls`.
@@ -381,11 +619,22 @@ export interface StepListParams extends CursorPageParams {
}
export namespace Steps {
- export import CodeToolCall = StepsAPI.CodeToolCall;
+ export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs;
+ export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage;
+ export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall;
+ export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta;
export import FunctionToolCall = StepsAPI.FunctionToolCall;
+ export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta;
export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails;
export import RetrievalToolCall = StepsAPI.RetrievalToolCall;
+ export import RetrievalToolCallDelta = StepsAPI.RetrievalToolCallDelta;
export import RunStep = StepsAPI.RunStep;
+ export import RunStepDelta = StepsAPI.RunStepDelta;
+ export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent;
+ export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta;
+ export import ToolCall = StepsAPI.ToolCall;
+ export import ToolCallDelta = StepsAPI.ToolCallDelta;
+ export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject;
export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails;
export import RunStepsPage = StepsAPI.RunStepsPage;
export import StepListParams = StepsAPI.StepListParams;
diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts
index 5aa1f8c25..cbde41f89 100644
--- a/src/resources/beta/threads/threads.ts
+++ b/src/resources/beta/threads/threads.ts
@@ -1,12 +1,15 @@
// File generated from our OpenAPI spec by Stainless.
import * as Core from 'openai/core';
+import { APIPromise } from 'openai/core';
import { APIResource } from 'openai/resource';
import { isRequestOptions } from 'openai/core';
+import { AssistantStream, ThreadCreateAndRunParamsBaseStream } from 'openai/lib/AssistantStream';
import * as ThreadsAPI from 'openai/resources/beta/threads/threads';
-import * as Shared from 'openai/resources/shared';
+import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages';
import * as RunsAPI from 'openai/resources/beta/threads/runs/runs';
+import { Stream } from 'openai/streaming';
export class Threads extends APIResource {
runs: RunsAPI.Runs = new RunsAPI.Runs(this._client);
@@ -65,12 +68,38 @@ export class Threads extends APIResource {
/**
* Create a thread and run it in one request.
*/
- createAndRun(body: ThreadCreateAndRunParams, options?: Core.RequestOptions): Core.APIPromise {
+ createAndRun(
+ body: ThreadCreateAndRunParamsNonStreaming,
+ options?: Core.RequestOptions,
+ ): APIPromise;
+ createAndRun(
+ body: ThreadCreateAndRunParamsStreaming,
+ options?: Core.RequestOptions,
+ ): APIPromise>;
+ createAndRun(
+ body: ThreadCreateAndRunParamsBase,
+ options?: Core.RequestOptions,
+ ): APIPromise | RunsAPI.Run>;
+ createAndRun(
+ body: ThreadCreateAndRunParams,
+ options?: Core.RequestOptions,
+ ): APIPromise | APIPromise> {
return this._client.post('/threads/runs', {
body,
...options,
headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
- });
+ stream: body.stream ?? false,
+ }) as APIPromise | APIPromise>;
+ }
+
+ /**
+ * Create a thread and stream the run back
+ */
+ createAndRunStream(
+ body: ThreadCreateAndRunParamsBaseStream,
+ options?: Core.RequestOptions,
+ ): AssistantStream {
+ return AssistantStream.createThreadAssistantStream(body, this._client.beta.threads, options);
}
}
@@ -168,7 +197,11 @@ export interface ThreadUpdateParams {
metadata?: unknown | null;
}
-export interface ThreadCreateAndRunParams {
+export type ThreadCreateAndRunParams =
+ | ThreadCreateAndRunParamsNonStreaming
+ | ThreadCreateAndRunParamsStreaming;
+
+export interface ThreadCreateAndRunParamsBase {
/**
* The ID of the
* [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
@@ -198,6 +231,13 @@ export interface ThreadCreateAndRunParams {
*/
model?: string | null;
+ /**
+ * If `true`, returns a stream of events that happen during the Run as server-sent
+ * events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ * message.
+ */
+ stream?: boolean | null;
+
/**
* If no thread is provided, an empty thread will be created.
*/
@@ -208,9 +248,7 @@ export interface ThreadCreateAndRunParams {
* modifying the behavior on a per-run basis.
*/
tools?: Array<
- | ThreadCreateAndRunParams.AssistantToolsCode
- | ThreadCreateAndRunParams.AssistantToolsRetrieval
- | ThreadCreateAndRunParams.AssistantToolsFunction
+ AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool
> | null;
}
@@ -265,27 +303,121 @@ export namespace ThreadCreateAndRunParams {
}
}
- export interface AssistantToolsCode {
+ export type ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming;
+ export type ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming;
+}
+
+export interface ThreadCreateAndRunParamsNonStreaming extends ThreadCreateAndRunParamsBase {
+ /**
+ * If `true`, returns a stream of events that happen during the Run as server-sent
+ * events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ * message.
+ */
+ stream?: false | null;
+}
+
+export interface ThreadCreateAndRunParamsStreaming extends ThreadCreateAndRunParamsBase {
+ /**
+ * If `true`, returns a stream of events that happen during the Run as server-sent
+ * events, terminating when the Run enters a terminal state with a `data: [DONE]`
+ * message.
+ */
+ stream: true;
+}
+
+export interface ThreadCreateAndRunStreamParams {
+ /**
+ * The ID of the
+ * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+ * execute this run.
+ */
+ assistant_id: string;
+
+ /**
+ * Override the default system message of the assistant. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ instructions?: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+ * be used to execute this run. If a value is provided here, it will override the
+ * model associated with the assistant. If not, the model associated with the
+ * assistant will be used.
+ */
+ model?: string | null;
+
+ /**
+ * If no thread is provided, an empty thread will be created.
+ */
+ thread?: ThreadCreateAndRunStreamParams.Thread;
+
+ /**
+ * Override the tools the assistant can use for this run. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ tools?: Array<
+ AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool
+ > | null;
+}
+
+export namespace ThreadCreateAndRunStreamParams {
+ /**
+ * If no thread is provided, an empty thread will be created.
+ */
+ export interface Thread {
/**
- * The type of tool being defined: `code_interpreter`
+ * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
+ * start the thread with.
*/
- type: 'code_interpreter';
- }
+ messages?: Array;
- export interface AssistantToolsRetrieval {
/**
- * The type of tool being defined: `retrieval`
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
*/
- type: 'retrieval';
+ metadata?: unknown | null;
}
- export interface AssistantToolsFunction {
- function: Shared.FunctionDefinition;
+ export namespace Thread {
+ export interface Message {
+ /**
+ * The content of the message.
+ */
+ content: string;
+
+ /**
+ * The role of the entity that is creating the message. Currently only `user` is
+ * supported.
+ */
+ role: 'user';
- /**
- * The type of tool being defined: `function`
- */
- type: 'function';
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the message should use. There can be a maximum of 10 files attached to a
+ * message. Useful for tools like `retrieval` and `code_interpreter` that can
+ * access and use files.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+ }
}
}
@@ -295,21 +427,46 @@ export namespace Threads {
export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams;
export import ThreadUpdateParams = ThreadsAPI.ThreadUpdateParams;
export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams;
+ export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming;
+ export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming;
+ export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams;
export import Runs = RunsAPI.Runs;
export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall;
export import Run = RunsAPI.Run;
export import RunStatus = RunsAPI.RunStatus;
export import RunsPage = RunsAPI.RunsPage;
export import RunCreateParams = RunsAPI.RunCreateParams;
+ export import RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming;
+ export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
export import RunUpdateParams = RunsAPI.RunUpdateParams;
export import RunListParams = RunsAPI.RunListParams;
+ export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams;
export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams;
+ export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming;
+ export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming;
+ export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams;
export import Messages = MessagesAPI.Messages;
- export import MessageContentImageFile = MessagesAPI.MessageContentImageFile;
- export import MessageContentText = MessagesAPI.MessageContentText;
- export import ThreadMessage = MessagesAPI.ThreadMessage;
- export import ThreadMessageDeleted = MessagesAPI.ThreadMessageDeleted;
- export import ThreadMessagesPage = MessagesAPI.ThreadMessagesPage;
+ export import Annotation = MessagesAPI.Annotation;
+ export import AnnotationDelta = MessagesAPI.AnnotationDelta;
+ export import FileCitationAnnotation = MessagesAPI.FileCitationAnnotation;
+ export import FileCitationDeltaAnnotation = MessagesAPI.FileCitationDeltaAnnotation;
+ export import FilePathAnnotation = MessagesAPI.FilePathAnnotation;
+ export import FilePathDeltaAnnotation = MessagesAPI.FilePathDeltaAnnotation;
+ export import ImageFile = MessagesAPI.ImageFile;
+ export import ImageFileContentBlock = MessagesAPI.ImageFileContentBlock;
+ export import ImageFileDelta = MessagesAPI.ImageFileDelta;
+ export import ImageFileDeltaBlock = MessagesAPI.ImageFileDeltaBlock;
+ export import Message = MessagesAPI.Message;
+ export import MessageContent = MessagesAPI.MessageContent;
+ export import MessageContentDelta = MessagesAPI.MessageContentDelta;
+ export import MessageDeleted = MessagesAPI.MessageDeleted;
+ export import MessageDelta = MessagesAPI.MessageDelta;
+ export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent;
+ export import Text = MessagesAPI.Text;
+ export import TextContentBlock = MessagesAPI.TextContentBlock;
+ export import TextDelta = MessagesAPI.TextDelta;
+ export import TextDeltaBlock = MessagesAPI.TextDeltaBlock;
+ export import MessagesPage = MessagesAPI.MessagesPage;
export import MessageCreateParams = MessagesAPI.MessageCreateParams;
export import MessageUpdateParams = MessagesAPI.MessageUpdateParams;
export import MessageListParams = MessagesAPI.MessageListParams;
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index c2d6da0be..41216a8e3 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -829,7 +829,7 @@ export interface ChatCompletionCreateParamsBase {
/**
* A list of tools the model may call. Currently, only functions are supported as a
* tool. Use this to provide a list of functions the model may generate JSON inputs
- * for.
+ * for. A max of 128 functions are supported.
*/
tools?: Array;
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index f3e262f5f..83ecb3e99 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -253,6 +253,8 @@ export interface CompletionCreateParamsBase {
/**
* The suffix that comes after a completion of inserted text.
+ *
+ * This parameter is only supported for `gpt-3.5-turbo-instruct`.
*/
suffix?: string | null;
diff --git a/src/resources/shared.ts b/src/resources/shared.ts
index 05ab66383..a6b2c11bd 100644
--- a/src/resources/shared.ts
+++ b/src/resources/shared.ts
@@ -1,5 +1,15 @@
// File generated from our OpenAPI spec by Stainless.
+export interface ErrorObject {
+ code: string | null;
+
+ message: string;
+
+ param: string | null;
+
+ type: string;
+}
+
export interface FunctionDefinition {
/**
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
diff --git a/src/streaming.ts b/src/streaming.ts
index f90c5d89a..c452737aa 100644
--- a/src/streaming.ts
+++ b/src/streaming.ts
@@ -78,6 +78,20 @@ export class Stream- implements AsyncIterable
- {
}
yield data;
+ } else {
+ let data;
+ try {
+ data = JSON.parse(sse.data);
+ } catch (e) {
+ console.error(`Could not parse message into JSON:`, sse.data);
+ console.error(`From chunk:`, sse.raw);
+ throw e;
+ }
+ // TODO: Is this where the error should be thrown?
+ if (sse.event == 'error') {
+ throw new APIError(undefined, data.error, data.message, undefined);
+ }
+ yield { event: sse.event, data: data } as any;
}
}
done = true;
diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts
index 5a720afce..45f17040a 100644
--- a/tests/api-resources/beta/threads/runs/runs.test.ts
+++ b/tests/api-resources/beta/threads/runs/runs.test.ts
@@ -27,6 +27,7 @@ describe('resource runs', () => {
instructions: 'string',
metadata: {},
model: 'string',
+ stream: false,
tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
});
});
@@ -127,6 +128,7 @@ describe('resource runs', () => {
{ tool_call_id: 'string', output: 'string' },
{ tool_call_id: 'string', output: 'string' },
],
+ stream: false,
});
});
});
diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts
index fc9fef723..9243dc11c 100644
--- a/tests/api-resources/beta/threads/threads.test.ts
+++ b/tests/api-resources/beta/threads/threads.test.ts
@@ -108,6 +108,7 @@ describe('resource threads', () => {
instructions: 'string',
metadata: {},
model: 'string',
+ stream: false,
thread: {
messages: [
{ role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
diff --git a/tests/streaming/assistants/assistant.test.ts b/tests/streaming/assistants/assistant.test.ts
new file mode 100644
index 000000000..e8db3d585
--- /dev/null
+++ b/tests/streaming/assistants/assistant.test.ts
@@ -0,0 +1,32 @@
+import OpenAI from 'openai';
+import { AssistantStream } from 'openai/lib/AssistantStream';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/',
+});
+
+describe('assistant tests', () => {
+ test('delta accumulation', () => {
+ expect(AssistantStream.accumulateDelta({}, {})).toEqual({});
+ expect(AssistantStream.accumulateDelta({}, { a: 'apple' })).toEqual({ a: 'apple' });
+
+ // strings
+ expect(AssistantStream.accumulateDelta({ a: 'foo' }, { a: ' bar' })).toEqual({ a: 'foo bar' });
+
+ // dictionaries
+ expect(AssistantStream.accumulateDelta({ a: { foo: '1' } }, { a: { bar: '2' } })).toEqual({
+ a: {
+ foo: '1',
+ bar: '2',
+ },
+ });
+ expect(AssistantStream.accumulateDelta({ a: { foo: 'hello,' } }, { a: { foo: ' world' } })).toEqual({
+ a: { foo: 'hello, world' },
+ });
+
+ expect(AssistantStream.accumulateDelta({}, { a: null })).toEqual({ a: null });
+ expect(AssistantStream.accumulateDelta({ a: null }, { a: 'apple' })).toEqual({ a: 'apple' });
+ expect(AssistantStream.accumulateDelta({ a: null }, { a: null })).toEqual({ a: null });
+ });
+});
From 993669b502416096e4fa3b9f300bf0746ecbec63 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Wed, 13 Mar 2024 16:31:06 -0400
Subject: [PATCH 024/533] release: 4.29.0
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 8 ++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 2813cb972..2f6cf24a7 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.28.5"
+ ".": "4.29.0"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8798e4b66..19dfcc620 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,13 @@
# Changelog
+## 4.29.0 (2024-03-13)
+
+Full Changelog: [v4.28.5...v4.29.0](https://github.com/openai/openai-node/compare/v4.28.5...v4.29.0)
+
+### Features
+
+* **assistants:** add support for streaming ([#714](https://github.com/openai/openai-node/issues/714)) ([7d27d28](https://github.com/openai/openai-node/commit/7d27d286876d0a575d91a4752f401126fe93d2a3))
+
## 4.28.5 (2024-03-13)
Full Changelog: [v4.28.4...v4.28.5](https://github.com/openai/openai-node/compare/v4.28.4...v4.28.5)
diff --git a/README.md b/README.md
index 24d38ac79..93ae9f044 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.28.5/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.29.0/mod.ts';
```
diff --git a/build-deno b/build-deno
index fb739cc50..c49755fda 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.28.5/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.29.0/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index d51c4ca96..ce6396c2e 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.28.5",
+ "version": "4.29.0",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index 516e764d1..0de2f3538 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.28.5'; // x-release-please-version
+export const VERSION = '4.29.0'; // x-release-please-version
From bc9a1ca308020a88c29d409edc06cdfca8cbf8f5 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 15 Mar 2024 13:31:36 -0400
Subject: [PATCH 025/533] docs(readme): assistant streaming (#719)
---
helpers.md | 158 ++++++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 145 insertions(+), 13 deletions(-)
diff --git a/helpers.md b/helpers.md
index 76423ee07..9f01a126a 100644
--- a/helpers.md
+++ b/helpers.md
@@ -1,6 +1,140 @@
-# Chat Completion Helpers
+# Streaming Helpers
-## Streaming Responses
+OpenAI supports streaming responses when interacting with the [Chat](#chat-streaming) or [Assistant](#assistant-streaming-api) APIs.
+
+## Assistant Streaming API
+
+OpenAI supports streaming responses from Assistants. The SDK provides convenience wrappers around the API
+so you can subscribe to the types of events you are interested in as well as receive accumulated responses.
+
+More information can be found in the documentation: [Assistant Streaming](https://platform.openai.com/docs/assistants/overview?lang=node.js)
+
+#### An example of creating a run and subscribing to some events
+
+```ts
+const run = openai.beta.threads.runs
+ .createAndStream(thread.id, {
+ assistant_id: assistant.id,
+ })
+ .on('textCreated', (text) => process.stdout.write('\nassistant > '))
+ .on('textDelta', (textDelta, snapshot) => process.stdout.write(textDelta.value))
+ .on('toolCallCreated', (toolCall) => process.stdout.write(`\nassistant > ${toolCall.type}\n\n`))
+ .on('toolCallDelta', (toolCallDelta, snapshot) => {
+ if (toolCallDelta.type === 'code_interpreter') {
+ if (toolCallDelta.code_interpreter.input) {
+ process.stdout.write(toolCallDelta.code_interpreter.input);
+ }
+ if (toolCallDelta.code_interpreter.outputs) {
+ process.stdout.write('\noutput >\n');
+ toolCallDelta.code_interpreter.outputs.forEach((output) => {
+ if (output.type === 'logs') {
+ process.stdout.write(`\n${output.logs}\n`);
+ }
+ });
+ }
+ }
+ });
+```
+
+### Assistant Events
+
+The assistant API provides events you can subscribe to for the following events.
+
+```ts
+.on('event', (event: AssistantStreamEvent) => ...)
+```
+
+This allows you to subscribe to all the possible raw events sent by the OpenAI streaming API.
+In many cases it will be more convenient to subscribe to a more specific set of events for your use case.
+
+More information on the types of events can be found here: [Events](https://platform.openai.com/docs/api-reference/assistants-streaming/events)
+
+```ts
+.on('runStepCreated', (runStep: RunStep) => ...)
+.on('runStepDelta', (delta: RunStepDelta, snapshot: RunStep) => ...)
+.on('runStepDone', (runStep: RunStep) => ...)
+```
+
+These events allow you to subscribe to the creation, delta and completion of a RunStep.
+
+For more information on how Runs and RunSteps work see the documentation [Runs and RunSteps](https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps)
+
+```ts
+.on('messageCreated', (message: Message) => ...)
+.on('messageDelta', (delta: MessageDelta, snapshot: Message) => ...)
+.on('messageDone', (message: Message) => ...)
+```
+
+This allows you to subscribe to Message creation, delta and completion events. Messages can contain
+different types of content that can be sent from a model (and events are available for specific content types).
+For convenience, the delta event includes both the incremental update and an accumulated snapshot of the content.
+
+More information on messages can be found
+on in the documentation page [Message](https://platform.openai.com/docs/api-reference/messages/object).
+
+```ts
+.on('textCreated', (content: Text) => ...)
+.on('textDelta', (delta: RunStepDelta, snapshot: Text) => ...)
+.on('textDone', (content: Text, snapshot: Message) => ...)
+```
+
+These events allow you to subscribe to the creation, delta and completion of a Text content (a specific type of message).
+For convenience, the delta event includes both the incremental update and an accumulated snapshot of the content.
+
+```ts
+.on('imageFileDone', (content: ImageFile, snapshot: Message) => ...)
+```
+
+Image files are not sent incrementally so an event is provided for when a image file is available.
+
+```ts
+.on('toolCallCreated', (toolCall: ToolCall) => ...)
+.on('toolCallDelta', (delta: RunStepDelta, snapshot: ToolCall) => ...)
+.on('toolCallDone', (toolCall: ToolCall) => ...)
+```
+
+These events allow you to subscribe to events for the creation, delta and completion of a ToolCall.
+
+More information on tools can be found here [Tools](https://platform.openai.com/docs/assistants/tools)
+
+```ts
+.on('end', () => ...)
+```
+
+The last event send when a stream ends.
+
+### Assistant Methods
+
+The assistant streaming object also provides a few methods for convenience:
+
+```ts
+.currentEvent()
+
+.currentRun()
+
+.currentMessageSnapshot()
+
+.currentRunStepSnapshot()
+```
+
+These methods are provided to allow you to access additional context from within event handlers. In many cases
+the handlers should include all the information you need for processing, but if additional context is required it
+can be accessed.
+
+Note: There is not always a relevant context in certain situations (these will be undefined in those cases).
+
+```ts
+await.finalMessages();
+
+await.finalRunSteps();
+```
+
+These methods are provided for convenience to collect information at the end of a stream. Calling these events
+will trigger consumption of the stream until completion and then return the relevant accumulated objects.
+
+## Chat Streaming
+
+### Streaming Responses
```ts
openai.chat.completions.stream({ stream?: false, … }, options?): ChatCompletionStreamingRunner
@@ -18,7 +152,7 @@ If you need to cancel a stream, you can `break` from a `for await` loop or call
See an example of streaming helpers in action in [`examples/stream.ts`](examples/stream.ts).
-## Automated Function Calls
+### Automated Function Calls
```ts
openai.chat.completions.runTools({ stream: false, … }, options?): ChatCompletionRunner
@@ -69,9 +203,7 @@ See an example of automated function calls in action in
Note, `runFunctions` was also previously available, but has been deprecated in favor of `runTools`.
-## Runner API
-
-### Events
+### Chat Events
#### `.on('connect', () => …)`
@@ -148,7 +280,7 @@ The event fired at the end, returning the total usage of the call.
The last event fired in the stream.
-### Methods
+### Chat Methods
#### `.abort()`
@@ -190,7 +322,7 @@ A promise which resolves with the last message with a `role: "function"`. Throws
A promise which resolves with the total usage.
-### Fields
+### Chat Fields
#### `.messages`
@@ -200,9 +332,9 @@ A mutable array of all messages in the conversation.
The underlying `AbortController` for the runner.
-## Examples
+### Chat Examples
-### Abort on a function call
+#### Abort on a function call
If you have a function call flow which you intend to _end_ with a certain function call, then you can use the second
argument `runner` given to the function to either mutate `runner.messages` or call `runner.abort()`.
@@ -238,7 +370,7 @@ async function main() {
main();
```
-### Integrate with `zod`
+#### Integrate with `zod`
[`zod`](https://www.npmjs.com/package/zod) is a schema validation library which can help with validating the
assistant's response to make sure it conforms to a schema. Paired with [`zod-to-json-schema`](https://www.npmjs.com/package/zod-to-json-schema), the validation schema also acts as the `parameters` JSON Schema passed to the API.
@@ -287,10 +419,10 @@ main();
See a more fully-fledged example in [`examples/function-call-helpers-zod.ts`](examples/function-call-helpers-zod.ts).
-### Integrate with Next.JS
+#### Integrate with Next.JS
See an example of a Next.JS integration here [`examples/stream-to-client-next.ts`](examples/stream-to-client-next.ts).
-### Proxy Streaming to a Browser
+#### Proxy Streaming to a Browser
See an example of using express to stream to a browser here [`examples/stream-to-client-express.ts`](examples/stream-to-client-express.ts).
From 4760ccc4be8b0951414eb443d186d3c506731195 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 15 Mar 2024 13:31:56 -0400
Subject: [PATCH 026/533] release: 4.29.1
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 8 ++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 2f6cf24a7..ded4849c4 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.29.0"
+ ".": "4.29.1"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 19dfcc620..741a701b3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,13 @@
# Changelog
+## 4.29.1 (2024-03-15)
+
+Full Changelog: [v4.29.0...v4.29.1](https://github.com/openai/openai-node/compare/v4.29.0...v4.29.1)
+
+### Documentation
+
+* **readme:** assistant streaming ([#719](https://github.com/openai/openai-node/issues/719)) ([bc9a1ca](https://github.com/openai/openai-node/commit/bc9a1ca308020a88c29d409edc06cdfca8cbf8f5))
+
## 4.29.0 (2024-03-13)
Full Changelog: [v4.28.5...v4.29.0](https://github.com/openai/openai-node/compare/v4.28.5...v4.29.0)
diff --git a/README.md b/README.md
index 93ae9f044..68d337e81 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.29.0/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.29.1/mod.ts';
```
diff --git a/build-deno b/build-deno
index c49755fda..c08e26dbc 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.29.0/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.29.1/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index ce6396c2e..8a6398765 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.29.0",
+ "version": "4.29.1",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index 0de2f3538..8acef11c3 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.29.0'; // x-release-please-version
+export const VERSION = '4.29.1'; // x-release-please-version
From 05ff8f7671fe6ce5d9517034f76a166a0bd27803 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 18 Mar 2024 22:27:15 -0400
Subject: [PATCH 027/533] docs: fix typo in CONTRIBUTING.md (#722)
---
CONTRIBUTING.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index d9e64025d..9e8f669a7 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -68,7 +68,7 @@ pnpm link -—global openai
Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests.
```bash
-npx prism path/to/your/openapi.yml
+npx prism mock path/to/your/openapi.yml
```
```bash
From 139e205ed1ed30cb1df982d852a093dcea945aba Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 19 Mar 2024 06:42:30 -0400
Subject: [PATCH 028/533] chore(internal): update generated pragma comment
(#724)
---
src/error.ts | 2 +-
src/index.ts | 2 +-
src/pagination.ts | 2 +-
src/resource.ts | 2 +-
src/resources/audio/audio.ts | 2 +-
src/resources/audio/index.ts | 2 +-
src/resources/audio/speech.ts | 2 +-
src/resources/audio/transcriptions.ts | 2 +-
src/resources/audio/translations.ts | 2 +-
src/resources/beta/assistants/assistants.ts | 2 +-
src/resources/beta/assistants/files.ts | 2 +-
src/resources/beta/assistants/index.ts | 2 +-
src/resources/beta/beta.ts | 2 +-
src/resources/beta/chat/chat.ts | 2 +-
src/resources/beta/chat/completions.ts | 2 +-
src/resources/beta/chat/index.ts | 2 +-
src/resources/beta/index.ts | 2 +-
src/resources/beta/threads/index.ts | 2 +-
src/resources/beta/threads/messages/files.ts | 2 +-
src/resources/beta/threads/messages/index.ts | 2 +-
src/resources/beta/threads/messages/messages.ts | 2 +-
src/resources/beta/threads/runs/index.ts | 2 +-
src/resources/beta/threads/runs/runs.ts | 2 +-
src/resources/beta/threads/runs/steps.ts | 2 +-
src/resources/beta/threads/threads.ts | 2 +-
src/resources/chat/chat.ts | 2 +-
src/resources/chat/completions.ts | 2 +-
src/resources/chat/index.ts | 2 +-
src/resources/completions.ts | 2 +-
src/resources/embeddings.ts | 2 +-
src/resources/files.ts | 2 +-
src/resources/fine-tuning/fine-tuning.ts | 2 +-
src/resources/fine-tuning/index.ts | 2 +-
src/resources/fine-tuning/jobs.ts | 2 +-
src/resources/images.ts | 2 +-
src/resources/index.ts | 2 +-
src/resources/models.ts | 2 +-
src/resources/moderations.ts | 2 +-
src/resources/shared.ts | 2 +-
tests/api-resources/audio/speech.test.ts | 2 +-
tests/api-resources/audio/transcriptions.test.ts | 2 +-
tests/api-resources/audio/translations.test.ts | 2 +-
tests/api-resources/beta/assistants/assistants.test.ts | 2 +-
tests/api-resources/beta/assistants/files.test.ts | 2 +-
tests/api-resources/beta/threads/messages/files.test.ts | 2 +-
tests/api-resources/beta/threads/messages/messages.test.ts | 2 +-
tests/api-resources/beta/threads/runs/runs.test.ts | 2 +-
tests/api-resources/beta/threads/runs/steps.test.ts | 2 +-
tests/api-resources/beta/threads/threads.test.ts | 2 +-
tests/api-resources/chat/completions.test.ts | 2 +-
tests/api-resources/completions.test.ts | 2 +-
tests/api-resources/embeddings.test.ts | 2 +-
tests/api-resources/files.test.ts | 2 +-
tests/api-resources/fine-tuning/jobs.test.ts | 2 +-
tests/api-resources/images.test.ts | 2 +-
tests/api-resources/models.test.ts | 2 +-
tests/api-resources/moderations.test.ts | 2 +-
tests/index.test.ts | 2 +-
58 files changed, 58 insertions(+), 58 deletions(-)
diff --git a/src/error.ts b/src/error.ts
index fd7477ad2..deac34c5d 100644
--- a/src/error.ts
+++ b/src/error.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { castToError, Headers } from './core';
diff --git a/src/index.ts b/src/index.ts
index 7b3033fa9..9a2b2eaad 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from './core';
import * as Errors from './error';
diff --git a/src/pagination.ts b/src/pagination.ts
index 5d890a140..63644e333 100644
--- a/src/pagination.ts
+++ b/src/pagination.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { AbstractPage, Response, APIClient, FinalRequestOptions, PageInfo } from './core';
diff --git a/src/resource.ts b/src/resource.ts
index 0bf87cf33..87847c879 100644
--- a/src/resource.ts
+++ b/src/resource.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import type { OpenAI } from './index';
diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts
index 960577b0d..f3fcba4c3 100644
--- a/src/resources/audio/audio.ts
+++ b/src/resources/audio/audio.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from 'openai/resource';
import * as SpeechAPI from 'openai/resources/audio/speech';
diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts
index 17c81d3bb..31732a267 100644
--- a/src/resources/audio/index.ts
+++ b/src/resources/audio/index.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export { Audio } from './audio';
export { SpeechCreateParams, Speech } from './speech';
diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts
index 7d0ee2195..4b83bae3e 100644
--- a/src/resources/audio/speech.ts
+++ b/src/resources/audio/speech.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts
index ab2079ed6..f01e8556d 100644
--- a/src/resources/audio/transcriptions.ts
+++ b/src/resources/audio/transcriptions.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts
index e68a714fb..234933236 100644
--- a/src/resources/audio/translations.ts
+++ b/src/resources/audio/translations.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/beta/assistants/assistants.ts b/src/resources/beta/assistants/assistants.ts
index b4e92fd92..1e8ca6ee9 100644
--- a/src/resources/beta/assistants/assistants.ts
+++ b/src/resources/beta/assistants/assistants.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/beta/assistants/files.ts b/src/resources/beta/assistants/files.ts
index 7de700e50..51fd0c0d8 100644
--- a/src/resources/beta/assistants/files.ts
+++ b/src/resources/beta/assistants/files.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/beta/assistants/index.ts b/src/resources/beta/assistants/index.ts
index 0ae8c9c67..c191d338b 100644
--- a/src/resources/beta/assistants/index.ts
+++ b/src/resources/beta/assistants/index.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export {
Assistant,
diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts
index 74056ed1d..43ee8c7e7 100644
--- a/src/resources/beta/beta.ts
+++ b/src/resources/beta/beta.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from 'openai/resource';
import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
diff --git a/src/resources/beta/chat/chat.ts b/src/resources/beta/chat/chat.ts
index a9cadc681..2b4a7a404 100644
--- a/src/resources/beta/chat/chat.ts
+++ b/src/resources/beta/chat/chat.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from 'openai/resource';
import * as CompletionsAPI from 'openai/resources/beta/chat/completions';
diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts
index e7f89f5cf..95fd0ac79 100644
--- a/src/resources/beta/chat/completions.ts
+++ b/src/resources/beta/chat/completions.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/beta/chat/index.ts b/src/resources/beta/chat/index.ts
index 8d0ee40ae..23b1b8ff3 100644
--- a/src/resources/beta/chat/index.ts
+++ b/src/resources/beta/chat/index.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export { Chat } from './chat';
export { Completions } from './completions';
diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts
index d8770c29a..7f35730fb 100644
--- a/src/resources/beta/index.ts
+++ b/src/resources/beta/index.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export {
Assistant,
diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts
index 3585be846..097a52819 100644
--- a/src/resources/beta/threads/index.ts
+++ b/src/resources/beta/threads/index.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export {
Annotation,
diff --git a/src/resources/beta/threads/messages/files.ts b/src/resources/beta/threads/messages/files.ts
index 72c01bb97..994b09d5f 100644
--- a/src/resources/beta/threads/messages/files.ts
+++ b/src/resources/beta/threads/messages/files.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/beta/threads/messages/index.ts b/src/resources/beta/threads/messages/index.ts
index f68edbbd4..ef446d012 100644
--- a/src/resources/beta/threads/messages/index.ts
+++ b/src/resources/beta/threads/messages/index.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export {
Annotation,
diff --git a/src/resources/beta/threads/messages/messages.ts b/src/resources/beta/threads/messages/messages.ts
index b38a4bbf0..a2f2aaf1c 100644
--- a/src/resources/beta/threads/messages/messages.ts
+++ b/src/resources/beta/threads/messages/messages.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts
index 7fa34637a..636b5d850 100644
--- a/src/resources/beta/threads/runs/index.ts
+++ b/src/resources/beta/threads/runs/index.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export {
CodeInterpreterLogs,
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index 8fe09ecc6..a28dd9ae9 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIPromise } from 'openai/core';
diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts
index 4218e9769..f0816fdb2 100644
--- a/src/resources/beta/threads/runs/steps.ts
+++ b/src/resources/beta/threads/runs/steps.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts
index cbde41f89..266f6709e 100644
--- a/src/resources/beta/threads/threads.ts
+++ b/src/resources/beta/threads/threads.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIPromise } from 'openai/core';
diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts
index 07c7700dc..6c7bccb22 100644
--- a/src/resources/chat/chat.ts
+++ b/src/resources/chat/chat.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from 'openai/resource';
import * as CompletionsAPI from 'openai/resources/chat/completions';
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index 41216a8e3..8119639f2 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIPromise } from 'openai/core';
diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts
index b8b69e453..78a7516ed 100644
--- a/src/resources/chat/index.ts
+++ b/src/resources/chat/index.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export { Chat } from './chat';
export {
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index 83ecb3e99..b64c3a166 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIPromise } from 'openai/core';
diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts
index 3f59d2a7c..208ceb240 100644
--- a/src/resources/embeddings.ts
+++ b/src/resources/embeddings.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/files.ts b/src/resources/files.ts
index cda487a63..820c7a1fa 100644
--- a/src/resources/files.ts
+++ b/src/resources/files.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts
index 5d2d27ac3..e62f8f09c 100644
--- a/src/resources/fine-tuning/fine-tuning.ts
+++ b/src/resources/fine-tuning/fine-tuning.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from 'openai/resource';
import * as JobsAPI from 'openai/resources/fine-tuning/jobs';
diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts
index c2cac49ac..2885f62f4 100644
--- a/src/resources/fine-tuning/index.ts
+++ b/src/resources/fine-tuning/index.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export { FineTuning } from './fine-tuning';
export {
diff --git a/src/resources/fine-tuning/jobs.ts b/src/resources/fine-tuning/jobs.ts
index 7bc216d7c..eb77405ca 100644
--- a/src/resources/fine-tuning/jobs.ts
+++ b/src/resources/fine-tuning/jobs.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/images.ts b/src/resources/images.ts
index bc5b9edc0..95f0b6ff2 100644
--- a/src/resources/images.ts
+++ b/src/resources/images.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 16ce85123..a9741f5fd 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export * from './chat/index';
export * from './shared';
diff --git a/src/resources/models.ts b/src/resources/models.ts
index 6c6c3379c..4d5bc57e9 100644
--- a/src/resources/models.ts
+++ b/src/resources/models.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts
index a43006ccf..b9b9d7fc6 100644
--- a/src/resources/moderations.ts
+++ b/src/resources/moderations.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
diff --git a/src/resources/shared.ts b/src/resources/shared.ts
index a6b2c11bd..93fa05fa4 100644
--- a/src/resources/shared.ts
+++ b/src/resources/shared.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export interface ErrorObject {
code: string | null;
diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts
index b0cf1a71c..18302ce9a 100644
--- a/tests/api-resources/audio/speech.test.ts
+++ b/tests/api-resources/audio/speech.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts
index 33652af53..3fc4ca22b 100644
--- a/tests/api-resources/audio/transcriptions.test.ts
+++ b/tests/api-resources/audio/transcriptions.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI, { toFile } from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/audio/translations.test.ts b/tests/api-resources/audio/translations.test.ts
index 723625f6e..0853bedfb 100644
--- a/tests/api-resources/audio/translations.test.ts
+++ b/tests/api-resources/audio/translations.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI, { toFile } from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/beta/assistants/assistants.test.ts b/tests/api-resources/beta/assistants/assistants.test.ts
index 60ca0a6e2..b11075d06 100644
--- a/tests/api-resources/beta/assistants/assistants.test.ts
+++ b/tests/api-resources/beta/assistants/assistants.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/beta/assistants/files.test.ts b/tests/api-resources/beta/assistants/files.test.ts
index 8db328442..e285b4664 100644
--- a/tests/api-resources/beta/assistants/files.test.ts
+++ b/tests/api-resources/beta/assistants/files.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/beta/threads/messages/files.test.ts b/tests/api-resources/beta/threads/messages/files.test.ts
index b4a00a868..58c8813fe 100644
--- a/tests/api-resources/beta/threads/messages/files.test.ts
+++ b/tests/api-resources/beta/threads/messages/files.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/beta/threads/messages/messages.test.ts b/tests/api-resources/beta/threads/messages/messages.test.ts
index 35538efb9..3a80bfe1e 100644
--- a/tests/api-resources/beta/threads/messages/messages.test.ts
+++ b/tests/api-resources/beta/threads/messages/messages.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts
index 45f17040a..5e1b363fd 100644
--- a/tests/api-resources/beta/threads/runs/runs.test.ts
+++ b/tests/api-resources/beta/threads/runs/runs.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/beta/threads/runs/steps.test.ts b/tests/api-resources/beta/threads/runs/steps.test.ts
index 76eec269a..76495a1a3 100644
--- a/tests/api-resources/beta/threads/runs/steps.test.ts
+++ b/tests/api-resources/beta/threads/runs/steps.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts
index 9243dc11c..24cb815a7 100644
--- a/tests/api-resources/beta/threads/threads.test.ts
+++ b/tests/api-resources/beta/threads/threads.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts
index 49f3562b0..e0ccb3910 100644
--- a/tests/api-resources/chat/completions.test.ts
+++ b/tests/api-resources/chat/completions.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts
index 85fc68498..2641bf7e3 100644
--- a/tests/api-resources/completions.test.ts
+++ b/tests/api-resources/completions.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts
index bcb5ffbba..d4e1f3240 100644
--- a/tests/api-resources/embeddings.test.ts
+++ b/tests/api-resources/embeddings.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts
index 9e6373aba..514f42e3a 100644
--- a/tests/api-resources/files.test.ts
+++ b/tests/api-resources/files.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI, { toFile } from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/fine-tuning/jobs.test.ts b/tests/api-resources/fine-tuning/jobs.test.ts
index 22f457303..d8f230abd 100644
--- a/tests/api-resources/fine-tuning/jobs.test.ts
+++ b/tests/api-resources/fine-tuning/jobs.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts
index 418a55eb0..33d633a63 100644
--- a/tests/api-resources/images.test.ts
+++ b/tests/api-resources/images.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI, { toFile } from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/models.test.ts b/tests/api-resources/models.test.ts
index 91eb0d055..ca1f98365 100644
--- a/tests/api-resources/models.test.ts
+++ b/tests/api-resources/models.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/api-resources/moderations.test.ts b/tests/api-resources/moderations.test.ts
index ad315df5d..ef7298fa9 100644
--- a/tests/api-resources/moderations.test.ts
+++ b/tests/api-resources/moderations.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { Response } from 'node-fetch';
diff --git a/tests/index.test.ts b/tests/index.test.ts
index 3fb42a80a..cd5f2a0a9 100644
--- a/tests/index.test.ts
+++ b/tests/index.test.ts
@@ -1,4 +1,4 @@
-// File generated from our OpenAPI spec by Stainless.
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import OpenAI from 'openai';
import { APIUserAbortError } from 'openai';
From 6a2c41b0ce833eba0cdea6a7d221697f3be26abb Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 19 Mar 2024 10:13:42 -0400
Subject: [PATCH 029/533] docs: assistant improvements (#725)
---
README.md | 31 +++++++++++++++++++++++++++++++
helpers.md | 37 ++++++++++++++++++++++++++++++-------
2 files changed, 61 insertions(+), 7 deletions(-)
diff --git a/README.md b/README.md
index 68d337e81..1eca06c85 100644
--- a/README.md
+++ b/README.md
@@ -100,6 +100,37 @@ Documentation for each method, request param, and response field are available i
> [!IMPORTANT]
> Previous versions of this SDK used a `Configuration` class. See the [v3 to v4 migration guide](https://github.com/openai/openai-node/discussions/217).
+### Streaming Helpers
+
+The SDK also includes helpers to process streams and handle the incoming events.
+
+```ts
+const run = openai.beta.threads.runs
+ .createAndStream(thread.id, {
+ assistant_id: assistant.id,
+ })
+ .on('textCreated', (text) => process.stdout.write('\nassistant > '))
+ .on('textDelta', (textDelta, snapshot) => process.stdout.write(textDelta.value))
+ .on('toolCallCreated', (toolCall) => process.stdout.write(`\nassistant > ${toolCall.type}\n\n`))
+ .on('toolCallDelta', (toolCallDelta, snapshot) => {
+ if (toolCallDelta.type === 'code_interpreter') {
+ if (toolCallDelta.code_interpreter.input) {
+ process.stdout.write(toolCallDelta.code_interpreter.input);
+ }
+ if (toolCallDelta.code_interpreter.outputs) {
+ process.stdout.write('\noutput >\n');
+ toolCallDelta.code_interpreter.outputs.forEach((output) => {
+ if (output.type === 'logs') {
+ process.stdout.write(`\n${output.logs}\n`);
+ }
+ });
+ }
+ }
+ });
+```
+
+More information on streaming helpers can be found in the dedicated documentation: [helpers.md](helpers.md)
+
### Streaming responses
This library provides several conveniences for streaming chat completions, for example:
diff --git a/helpers.md b/helpers.md
index 9f01a126a..9a94a618e 100644
--- a/helpers.md
+++ b/helpers.md
@@ -36,6 +36,29 @@ const run = openai.beta.threads.runs
});
```
+### Starting a stream
+
+There are three helper methods for creating streams:
+
+```ts
+openai.beta.threads.runs.createAndStream();
+```
+
+This method can be used to start and stream the response to an existing run with an associated thread
+that is already populated with messages.
+
+```ts
+openai.beta.threads.createAndRunStream();
+```
+
+This method can be used to add a message to a thread, start a run and then stream the response.
+
+```ts
+openai.beta.threads.runs.submitToolOutputsStream();
+```
+
+This method can be used to submit a tool output to a run waiting on the output and start a stream.
+
### Assistant Events
The assistant API provides events you can subscribe to for the following events.
@@ -108,25 +131,25 @@ The last event send when a stream ends.
The assistant streaming object also provides a few methods for convenience:
```ts
-.currentEvent()
+.currentEvent(): AssistantStreamEvent | undefined
-.currentRun()
+.currentRun(): Run | undefined
-.currentMessageSnapshot()
+.currentMessageSnapshot(): Message
-.currentRunStepSnapshot()
+.currentRunStepSnapshot(): Runs.RunStep
```
These methods are provided to allow you to access additional context from within event handlers. In many cases
the handlers should include all the information you need for processing, but if additional context is required it
can be accessed.
-Note: There is not always a relevant context in certain situations (these will be undefined in those cases).
+Note: There is not always a relevant context in certain situations (these will be `undefined` in those cases).
```ts
-await.finalMessages();
+await .finalMessages() : Promise
-await.finalRunSteps();
+await .finalRunSteps(): Promise
```
These methods are provided for convenience to collect information at the end of a stream. Calling these events
From dda3f6890cf6a6b8f885a6470240b3036eab3b09 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 19 Mar 2024 10:14:03 -0400
Subject: [PATCH 030/533] release: 4.29.2
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 14 ++++++++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 19 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index ded4849c4..fc4efb3a0 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.29.1"
+ ".": "4.29.2"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 741a701b3..497a341af 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,19 @@
# Changelog
+## 4.29.2 (2024-03-19)
+
+Full Changelog: [v4.29.1...v4.29.2](https://github.com/openai/openai-node/compare/v4.29.1...v4.29.2)
+
+### Chores
+
+* **internal:** update generated pragma comment ([#724](https://github.com/openai/openai-node/issues/724)) ([139e205](https://github.com/openai/openai-node/commit/139e205ed1ed30cb1df982d852a093dcea945aba))
+
+
+### Documentation
+
+* assistant improvements ([#725](https://github.com/openai/openai-node/issues/725)) ([6a2c41b](https://github.com/openai/openai-node/commit/6a2c41b0ce833eba0cdea6a7d221697f3be26abb))
+* fix typo in CONTRIBUTING.md ([#722](https://github.com/openai/openai-node/issues/722)) ([05ff8f7](https://github.com/openai/openai-node/commit/05ff8f7671fe6ce5d9517034f76a166a0bd27803))
+
## 4.29.1 (2024-03-15)
Full Changelog: [v4.29.0...v4.29.1](https://github.com/openai/openai-node/compare/v4.29.0...v4.29.1)
diff --git a/README.md b/README.md
index 1eca06c85..9699fca42 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.29.1/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.29.2/mod.ts';
```
diff --git a/build-deno b/build-deno
index c08e26dbc..25569475f 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.29.1/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.29.2/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index 8a6398765..25994c236 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.29.1",
+ "version": "4.29.2",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index 8acef11c3..a9177ff54 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.29.1'; // x-release-please-version
+export const VERSION = '4.29.2'; // x-release-please-version
From 7d87199f5245e9c5a4ebee34e15838ae5ce47100 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 19 Mar 2024 19:00:03 -0400
Subject: [PATCH 031/533] fix(internal): make toFile use input file's options
(#727)
---
src/uploads.ts | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/src/uploads.ts b/src/uploads.ts
index 2398baf35..081827c9a 100644
--- a/src/uploads.ts
+++ b/src/uploads.ts
@@ -102,11 +102,14 @@ export type ToFileInput = Uploadable | Exclude | AsyncIter
export async function toFile(
value: ToFileInput | PromiseLike,
name?: string | null | undefined,
- options: FilePropertyBag | undefined = {},
+ options?: FilePropertyBag | undefined,
): Promise {
// If it's a promise, resolve it.
value = await value;
+ // Use the file's options if there isn't one provided
+ options ??= isFileLike(value) ? { lastModified: value.lastModified, type: value.type } : {};
+
if (isResponseLike(value)) {
const blob = await value.blob();
name ||= new URL(value.url).pathname.split(/[\\/]/).pop() ?? 'unknown_file';
From 3c59fa750cf25fc65395482794b8c3b90f826674 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Wed, 20 Mar 2024 16:01:47 -0400
Subject: [PATCH 032/533] docs(readme): consistent use of sentence case in
headings (#729)
---
README.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/README.md b/README.md
index 9699fca42..a7bf62f94 100644
--- a/README.md
+++ b/README.md
@@ -46,7 +46,7 @@ async function main() {
main();
```
-## Streaming Responses
+## Streaming responses
We provide support for streaming responses using Server Sent Events (SSE).
@@ -256,7 +256,7 @@ Note that `runFunctions` was previously available as well, but has been deprecat
Read more about various examples such as with integrating with [zod](helpers.md#integrate-with-zod),
[next.js](helpers.md#integrate-wtih-next-js), and [proxying a stream to the browser](helpers.md#proxy-streaming-to-a-browser).
-## File Uploads
+## File uploads
Request parameters that correspond to file uploads can be passed in many different forms:
@@ -497,7 +497,7 @@ await openai.models.list({
});
```
-## Semantic Versioning
+## Semantic versioning
This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions:
From a7cc3e15bf2ed64bf02a559d2956a3f89f43e5ff Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Wed, 20 Mar 2024 23:26:10 -0400
Subject: [PATCH 033/533] docs(readme): document how to make undocumented
requests (#730)
---
README.md | 50 ++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 48 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index a7bf62f94..6676cba0d 100644
--- a/README.md
+++ b/README.md
@@ -437,7 +437,51 @@ console.log(raw.headers.get('X-My-Header'));
console.log(chatCompletion);
```
-## Customizing the fetch client
+### Making custom/undocumented requests
+
+This library is typed for convenient access to the documented API. If you need to access undocumented
+endpoints, params, or response properties, the library can still be used.
+
+#### Undocumented endpoints
+
+To make requests to undocumented endpoints, you can use `client.get`, `client.post`, and other HTTP verbs.
+Options on the client, such as retries, will be respected when making these requests.
+
+```ts
+await client.post('/some/path', {
+ body: { some_prop: 'foo' },
+ query: { some_query_arg: 'bar' },
+});
+```
+
+#### Undocumented params
+
+To make requests using undocumented parameters, you may use `// @ts-expect-error` on the undocumented
+parameter. This library doesn't validate at runtime that the request matches the type, so any extra values you
+send will be sent as-is.
+
+```ts
+client.foo.create({
+ foo: 'my_param',
+ bar: 12,
+ // @ts-expect-error baz is not yet public
+ baz: 'undocumented option',
+});
+```
+
+For requests with the `GET` verb, any extra params will be in the query, all other requests will send the
+extra param in the body.
+
+If you want to explicitly send an extra argument, you can do so with the `query`, `body`, and `headers` request
+options.
+
+#### Undocumented properties
+
+To access undocumented response properties, you may access the response object with `// @ts-expect-error` on
+the response object, or cast the response object to the requisite type. Like the request params, we do not
+validate or strip extra properties from the response from the API.
+
+### Customizing the fetch client
By default, this library uses `node-fetch` in Node, and expects a global `fetch` function in other environments.
@@ -455,6 +499,8 @@ import OpenAI from 'openai';
To do the inverse, add `import "openai/shims/node"` (which does import polyfills).
This can also be useful if you are getting the wrong TypeScript types for `Response` ([more details](https://github.com/openai/openai-node/tree/master/src/_shims#readme)).
+### Logging and middleware
+
You may also provide a custom `fetch` function when instantiating the client,
which can be used to inspect or alter the `Request` or `Response` before/after each request:
@@ -475,7 +521,7 @@ const client = new OpenAI({
Note that if given a `DEBUG=true` environment variable, this library will log all requests and responses automatically.
This is intended for debugging purposes only and may change in the future without notice.
-## Configuring an HTTP(S) Agent (e.g., for proxies)
+### Configuring an HTTP(S) Agent (e.g., for proxies)
By default, this library uses a stable agent for all http/https requests to reuse TCP connections, eliminating many TCP & TLS handshakes and shaving around 100ms off most requests.
From 1b5f9027728341061ec40b32e1010928db5253fc Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 21 Mar 2024 11:55:15 -0400
Subject: [PATCH 034/533] fix: handle process.env being undefined in debug func
(#733)
---
src/core.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/core.ts b/src/core.ts
index a94251808..4364c7a3c 100644
--- a/src/core.ts
+++ b/src/core.ts
@@ -1075,7 +1075,7 @@ function applyHeadersMut(targetHeaders: Headers, newHeaders: Headers): void {
}
export function debug(action: string, ...args: any[]) {
- if (typeof process !== 'undefined' && process.env['DEBUG'] === 'true') {
+ if (typeof process !== 'undefined' && process?.env?.['DEBUG'] === 'true') {
console.log(`OpenAI:DEBUG:${action}`, ...args);
}
}
From f2925e54f32f972ab439d4a6d36a422ec56524c3 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 25 Mar 2024 07:30:00 -0400
Subject: [PATCH 035/533] fix(client): correctly send deno version header
(#736)
---
src/core.ts | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/src/core.ts b/src/core.ts
index 4364c7a3c..39fe0f97f 100644
--- a/src/core.ts
+++ b/src/core.ts
@@ -818,7 +818,8 @@ const getPlatformProperties = (): PlatformProperties => {
'X-Stainless-OS': normalizePlatform(Deno.build.os),
'X-Stainless-Arch': normalizeArch(Deno.build.arch),
'X-Stainless-Runtime': 'deno',
- 'X-Stainless-Runtime-Version': Deno.version,
+ 'X-Stainless-Runtime-Version':
+ typeof Deno.version === 'string' ? Deno.version : Deno.version?.deno ?? 'unknown',
};
}
if (typeof EdgeRuntime !== 'undefined') {
From 1b1d357314d9b1995c9787fec9fa8514fd384886 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 26 Mar 2024 19:08:47 -0400
Subject: [PATCH 036/533] chore(internal): add type (#737)
---
src/streaming.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/streaming.ts b/src/streaming.ts
index c452737aa..6b0f2a345 100644
--- a/src/streaming.ts
+++ b/src/streaming.ts
@@ -201,7 +201,7 @@ export class Stream
- implements AsyncIterable
- {
async start() {
iter = self[Symbol.asyncIterator]();
},
- async pull(ctrl) {
+ async pull(ctrl: any) {
try {
const { value, done } = await iter.next();
if (done) return ctrl.close();
From 3dcaa345a7395e80cb91f32c5b2361a5dd8d1222 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Wed, 27 Mar 2024 19:51:22 +0000
Subject: [PATCH 037/533] feat: assistant fromReadableStream (#738)
---
src/lib/AssistantStream.ts | 28 +++++++++++++++++++++++++++-
1 file changed, 27 insertions(+), 1 deletion(-)
diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts
index d70cb7358..c0a176db5 100644
--- a/src/lib/AssistantStream.ts
+++ b/src/lib/AssistantStream.ts
@@ -158,6 +158,32 @@ export class AssistantStream
};
}
+ static fromReadableStream(stream: ReadableStream): AssistantStream {
+ const runner = new AssistantStream();
+ runner._run(() => runner._fromReadableStream(stream));
+ return runner;
+ }
+
+ protected async _fromReadableStream(
+ readableStream: ReadableStream,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const signal = options?.signal;
+ if (signal) {
+ if (signal.aborted) this.controller.abort();
+ signal.addEventListener('abort', () => this.controller.abort());
+ }
+ this._connected();
+ const stream = Stream.fromReadableStream(readableStream, this.controller);
+ for await (const event of stream) {
+ this.#handleEvent(event);
+ }
+ if (stream.controller.signal?.aborted) {
+ throw new APIUserAbortError();
+ }
+ return this._addRun(this.#endRequest());
+ }
+
toReadableStream(): ReadableStream {
const stream = new Stream(this[Symbol.asyncIterator].bind(this), this.controller);
return stream.toReadableStream();
@@ -385,7 +411,7 @@ export class AssistantStream
throw new OpenAIError(`stream has ended, this shouldn't happen`);
}
- if (!this.#finalRun) throw Error('Final run has been been received');
+ if (!this.#finalRun) throw Error('Final run has not been received');
return this.#finalRun;
}
From 237388533476b8b34fbda7ce5fbb9b466dae9c3c Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Wed, 27 Mar 2024 20:38:46 +0000
Subject: [PATCH 038/533] fix(example): correcting example (#739)
---
examples/assistant-stream-raw.ts | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/examples/assistant-stream-raw.ts b/examples/assistant-stream-raw.ts
index a882d219a..399064807 100644
--- a/examples/assistant-stream-raw.ts
+++ b/examples/assistant-stream-raw.ts
@@ -1,3 +1,5 @@
+#!/usr/bin/env -S npm run tsn -T
+
import OpenAI from 'openai';
const openai = new OpenAI();
@@ -27,7 +29,7 @@ async function main() {
for await (const event of stream) {
if (event.event === 'thread.message.delta') {
const chunk = event.data.delta.content?.[0];
- if (chunk && 'text' in chunk) {
+ if (chunk && 'text' in chunk && chunk.text.value) {
process.stdout.write(chunk.text.value);
}
}
From 540d9ca9b9d84df6987fdedf640c2fa761417f2e Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 28 Mar 2024 05:06:39 +0000
Subject: [PATCH 039/533] release: 4.30.0
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 27 +++++++++++++++++++++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 32 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index fc4efb3a0..1e5205f3f 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.29.2"
+ ".": "4.30.0"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 497a341af..bbc1785dc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,32 @@
# Changelog
+## 4.30.0 (2024-03-28)
+
+Full Changelog: [v4.29.2...v4.30.0](https://github.com/openai/openai-node/compare/v4.29.2...v4.30.0)
+
+### Features
+
+* assistant fromReadableStream ([#738](https://github.com/openai/openai-node/issues/738)) ([8f4ba18](https://github.com/openai/openai-node/commit/8f4ba18268797d6c54c393d701b13c7ff2aa71bc))
+
+
+### Bug Fixes
+
+* **client:** correctly send deno version header ([#736](https://github.com/openai/openai-node/issues/736)) ([b7ea175](https://github.com/openai/openai-node/commit/b7ea175b2854909de77b920dd25613f1d2daefd6))
+* **example:** correcting example ([#739](https://github.com/openai/openai-node/issues/739)) ([a819551](https://github.com/openai/openai-node/commit/a81955175da24e196490a38850bbf6f9b6779ea8))
+* handle process.env being undefined in debug func ([#733](https://github.com/openai/openai-node/issues/733)) ([2baa149](https://github.com/openai/openai-node/commit/2baa1491f7834f779ca49c3027d2344ead412dd2))
+* **internal:** make toFile use input file's options ([#727](https://github.com/openai/openai-node/issues/727)) ([15880d7](https://github.com/openai/openai-node/commit/15880d77b6c1cf58a6b9cfdbf7ae4442cdbddbd6))
+
+
+### Chores
+
+* **internal:** add type ([#737](https://github.com/openai/openai-node/issues/737)) ([18c1989](https://github.com/openai/openai-node/commit/18c19891f783019517d7961fe03c4d98de0fcf93))
+
+
+### Documentation
+
+* **readme:** consistent use of sentence case in headings ([#729](https://github.com/openai/openai-node/issues/729)) ([7e515fd](https://github.com/openai/openai-node/commit/7e515fde433ebfb7871d75d53915eef05a08a916))
+* **readme:** document how to make undocumented requests ([#730](https://github.com/openai/openai-node/issues/730)) ([a06d861](https://github.com/openai/openai-node/commit/a06d861a015eeee411fa2c6ed9bf3000313cfc03))
+
## 4.29.2 (2024-03-19)
Full Changelog: [v4.29.1...v4.29.2](https://github.com/openai/openai-node/compare/v4.29.1...v4.29.2)
diff --git a/README.md b/README.md
index 6676cba0d..892c0ca1b 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.29.2/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.30.0/mod.ts';
```
diff --git a/build-deno b/build-deno
index 25569475f..6290acb0c 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.29.2/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.30.0/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index 25994c236..57fa7aec6 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.29.2",
+ "version": "4.30.0",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index a9177ff54..2eb76a884 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.29.2'; // x-release-please-version
+export const VERSION = '4.30.0'; // x-release-please-version
From 7741b186fe7b04bf69594b1fb106e1deba3e52e0 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 29 Mar 2024 20:28:58 +0000
Subject: [PATCH 040/533] fix(streaming): trigger all event handlers with
fromReadableStream (#741)
---
src/lib/AssistantStream.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts
index c0a176db5..ece0ec65c 100644
--- a/src/lib/AssistantStream.ts
+++ b/src/lib/AssistantStream.ts
@@ -176,7 +176,7 @@ export class AssistantStream
this._connected();
const stream = Stream.fromReadableStream(readableStream, this.controller);
for await (const event of stream) {
- this.#handleEvent(event);
+ this.#addEvent(event);
}
if (stream.controller.signal?.aborted) {
throw new APIUserAbortError();
From 149d60e80ac5ab5b12a10a38f9b0d159dffd56ae Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 29 Mar 2024 21:08:19 +0000
Subject: [PATCH 041/533] feat(api): adding temperature parameter (#742)
---
.../beta/threads/messages/messages.ts | 16 ++++---
src/resources/beta/threads/runs/runs.ts | 19 ++++++++
src/resources/beta/threads/threads.ts | 44 +++++++++++++++----
.../beta/threads/runs/runs.test.ts | 1 +
.../beta/threads/threads.test.ts | 1 +
5 files changed, 66 insertions(+), 15 deletions(-)
diff --git a/src/resources/beta/threads/messages/messages.ts b/src/resources/beta/threads/messages/messages.ts
index a2f2aaf1c..1c37eb2ff 100644
--- a/src/resources/beta/threads/messages/messages.ts
+++ b/src/resources/beta/threads/messages/messages.ts
@@ -353,9 +353,9 @@ export interface Message {
role: 'user' | 'assistant';
/**
- * If applicable, the ID of the
- * [run](https://platform.openai.com/docs/api-reference/runs) associated with the
- * authoring of this message.
+ * The ID of the [run](https://platform.openai.com/docs/api-reference/runs)
+ * associated with the creation of this message. Value is `null` when messages are
+ * created manually using the create message or create thread endpoints.
*/
run_id: string | null;
@@ -501,10 +501,14 @@ export interface MessageCreateParams {
content: string;
/**
- * The role of the entity that is creating the message. Currently only `user` is
- * supported.
+ * The role of the entity that is creating the message. Allowed values include:
+ *
+ * - `user`: Indicates the message is sent by an actual user and should be used in
+ * most cases to represent user-generated messages.
+ * - `assistant`: Indicates the message is generated by the assistant. Use this
+ * value to insert messages from the assistant into the conversation.
*/
- role: 'user';
+ role: 'user' | 'assistant';
/**
* A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index a28dd9ae9..54c671131 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -331,6 +331,11 @@ export interface Run {
* in a terminal state (i.e. `in_progress`, `queued`, etc.).
*/
usage: Run.Usage | null;
+
+ /**
+ * The sampling temperature used for this run. If not set, defaults to 1.
+ */
+ temperature?: number | null;
}
export namespace Run {
@@ -461,6 +466,13 @@ export interface RunCreateParamsBase {
*/
stream?: boolean | null;
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
@@ -555,6 +567,13 @@ export interface RunCreateAndStreamParams {
*/
model?: string | null;
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts
index 266f6709e..9b4785850 100644
--- a/src/resources/beta/threads/threads.ts
+++ b/src/resources/beta/threads/threads.ts
@@ -164,10 +164,14 @@ export namespace ThreadCreateParams {
content: string;
/**
- * The role of the entity that is creating the message. Currently only `user` is
- * supported.
+ * The role of the entity that is creating the message. Allowed values include:
+ *
+ * - `user`: Indicates the message is sent by an actual user and should be used in
+ * most cases to represent user-generated messages.
+ * - `assistant`: Indicates the message is generated by the assistant. Use this
+ * value to insert messages from the assistant into the conversation.
*/
- role: 'user';
+ role: 'user' | 'assistant';
/**
* A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
@@ -238,6 +242,13 @@ export interface ThreadCreateAndRunParamsBase {
*/
stream?: boolean | null;
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
/**
* If no thread is provided, an empty thread will be created.
*/
@@ -280,10 +291,14 @@ export namespace ThreadCreateAndRunParams {
content: string;
/**
- * The role of the entity that is creating the message. Currently only `user` is
- * supported.
+ * The role of the entity that is creating the message. Allowed values include:
+ *
+ * - `user`: Indicates the message is sent by an actual user and should be used in
+ * most cases to represent user-generated messages.
+ * - `assistant`: Indicates the message is generated by the assistant. Use this
+ * value to insert messages from the assistant into the conversation.
*/
- role: 'user';
+ role: 'user' | 'assistant';
/**
* A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
@@ -355,6 +370,13 @@ export interface ThreadCreateAndRunStreamParams {
*/
model?: string | null;
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
/**
* If no thread is provided, an empty thread will be created.
*/
@@ -397,10 +419,14 @@ export namespace ThreadCreateAndRunStreamParams {
content: string;
/**
- * The role of the entity that is creating the message. Currently only `user` is
- * supported.
+ * The role of the entity that is creating the message. Allowed values include:
+ *
+ * - `user`: Indicates the message is sent by an actual user and should be used in
+ * most cases to represent user-generated messages.
+ * - `assistant`: Indicates the message is generated by the assistant. Use this
+ * value to insert messages from the assistant into the conversation.
*/
- role: 'user';
+ role: 'user' | 'assistant';
/**
* A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts
index 5e1b363fd..5f17c1b58 100644
--- a/tests/api-resources/beta/threads/runs/runs.test.ts
+++ b/tests/api-resources/beta/threads/runs/runs.test.ts
@@ -28,6 +28,7 @@ describe('resource runs', () => {
metadata: {},
model: 'string',
stream: false,
+ temperature: 1,
tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
});
});
diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts
index 24cb815a7..3606019bd 100644
--- a/tests/api-resources/beta/threads/threads.test.ts
+++ b/tests/api-resources/beta/threads/threads.test.ts
@@ -109,6 +109,7 @@ describe('resource threads', () => {
metadata: {},
model: 'string',
stream: false,
+ temperature: 1,
thread: {
messages: [
{ role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
From abb0be7bcc6777e2efa8682c8a044842addb755a Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Sat, 30 Mar 2024 05:06:22 +0000
Subject: [PATCH 042/533] release: 4.31.0
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 13 +++++++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 18 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 1e5205f3f..485bcd4e9 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.30.0"
+ ".": "4.31.0"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bbc1785dc..6a28f8d3c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,18 @@
# Changelog
+## 4.31.0 (2024-03-30)
+
+Full Changelog: [v4.30.0...v4.31.0](https://github.com/openai/openai-node/compare/v4.30.0...v4.31.0)
+
+### Features
+
+* **api:** adding temperature parameter ([#742](https://github.com/openai/openai-node/issues/742)) ([b173b05](https://github.com/openai/openai-node/commit/b173b05eb52266d8f2c835ec4ed71cba8cdc609b))
+
+
+### Bug Fixes
+
+* **streaming:** trigger all event handlers with fromReadableStream ([#741](https://github.com/openai/openai-node/issues/741)) ([7b1e593](https://github.com/openai/openai-node/commit/7b1e5937d97b309ed51928b4388dcde74abda8dc))
+
## 4.30.0 (2024-03-28)
Full Changelog: [v4.29.2...v4.30.0](https://github.com/openai/openai-node/compare/v4.29.2...v4.30.0)
diff --git a/README.md b/README.md
index 892c0ca1b..787dd25ae 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.30.0/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.31.0/mod.ts';
```
diff --git a/build-deno b/build-deno
index 6290acb0c..66639030f 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.30.0/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.31.0/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index 57fa7aec6..250e0939a 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.30.0",
+ "version": "4.31.0",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index 2eb76a884..8eb5423f5 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.30.0'; // x-release-please-version
+export const VERSION = '4.31.0'; // x-release-please-version
From 60bc77f87e860e86b702c506cf9e1a725b81a697 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Sat, 30 Mar 2024 20:44:38 +0000
Subject: [PATCH 043/533] docs(readme): change undocumented params wording
(#744)
---
README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 787dd25ae..6707707b2 100644
--- a/README.md
+++ b/README.md
@@ -454,7 +454,7 @@ await client.post('/some/path', {
});
```
-#### Undocumented params
+#### Undocumented request params
To make requests using undocumented parameters, you may use `// @ts-expect-error` on the undocumented
parameter. This library doesn't validate at runtime that the request matches the type, so any extra values you
@@ -475,7 +475,7 @@ extra param in the body.
If you want to explicitly send an extra argument, you can do so with the `query`, `body`, and `headers` request
options.
-#### Undocumented properties
+#### Undocumented response properties
To access undocumented response properties, you may access the response object with `// @ts-expect-error` on
the response object, or cast the response object to the requisite type. Like the request params, we do not
From 767bec025dd349c3a982a0aa62b134692d9a3ad2 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 1 Apr 2024 22:52:20 +0200
Subject: [PATCH 044/533] feat(api): add support for filtering messages by
run_id (#747)
---
src/resources/beta/threads/messages/messages.ts | 5 +++++
tests/api-resources/beta/threads/messages/messages.test.ts | 2 +-
2 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/src/resources/beta/threads/messages/messages.ts b/src/resources/beta/threads/messages/messages.ts
index 1c37eb2ff..28026f3ff 100644
--- a/src/resources/beta/threads/messages/messages.ts
+++ b/src/resources/beta/threads/messages/messages.ts
@@ -551,6 +551,11 @@ export interface MessageListParams extends CursorPageParams {
* order and `desc` for descending order.
*/
order?: 'asc' | 'desc';
+
+ /**
+ * Filter messages by the run ID that generated them.
+ */
+ run_id?: string;
}
export namespace Messages {
diff --git a/tests/api-resources/beta/threads/messages/messages.test.ts b/tests/api-resources/beta/threads/messages/messages.test.ts
index 3a80bfe1e..7f62944e0 100644
--- a/tests/api-resources/beta/threads/messages/messages.test.ts
+++ b/tests/api-resources/beta/threads/messages/messages.test.ts
@@ -81,7 +81,7 @@ describe('resource messages', () => {
await expect(
openai.beta.threads.messages.list(
'string',
- { after: 'string', before: 'string', limit: 0, order: 'asc' },
+ { after: 'string', before: 'string', limit: 0, order: 'asc', run_id: 'string' },
{ path: '/_stainless_unknown_path' },
),
).rejects.toThrow(OpenAI.NotFoundError);
From bc202fcdd9d3f54ff028b7f809b784f26fbf9b29 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 1 Apr 2024 23:13:12 +0200
Subject: [PATCH 045/533] chore(deps): remove unused dependency digest-fetch
(#748)
---
package.json | 1 -
tsconfig.build.json | 1 -
tsconfig.deno.json | 1 -
tsconfig.json | 1 -
typings/digest-fetch/index.d.ts | 33 -----------------------------
yarn.lock | 37 ---------------------------------
6 files changed, 74 deletions(-)
delete mode 100644 typings/digest-fetch/index.d.ts
diff --git a/package.json b/package.json
index 250e0939a..6fb9f1789 100644
--- a/package.json
+++ b/package.json
@@ -29,7 +29,6 @@
"@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
- "digest-fetch": "^1.3.0",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7",
diff --git a/tsconfig.build.json b/tsconfig.build.json
index 6adad0d06..45811cb8b 100644
--- a/tsconfig.build.json
+++ b/tsconfig.build.json
@@ -7,7 +7,6 @@
"paths": {
"openai/*": ["dist/src/*"],
"openai": ["dist/src/index.ts"],
- "digest-fetch": ["./typings/digest-fetch"]
},
"noEmit": false,
"declaration": true,
diff --git a/tsconfig.deno.json b/tsconfig.deno.json
index 5d6467665..d0e9473d9 100644
--- a/tsconfig.deno.json
+++ b/tsconfig.deno.json
@@ -9,7 +9,6 @@
"openai/_shims/auto/*": ["deno/_shims/auto/*-deno"],
"openai/*": ["deno/*"],
"openai": ["deno/index.ts"],
- "digest-fetch": ["./typings/digest-fetch"]
},
"noEmit": true,
"declaration": true,
diff --git a/tsconfig.json b/tsconfig.json
index 9908b2c80..5f99085fc 100644
--- a/tsconfig.json
+++ b/tsconfig.json
@@ -12,7 +12,6 @@
"openai/_shims/auto/*": ["src/_shims/auto/*-node"],
"openai/*": ["src/*"],
"openai": ["src/index.ts"],
- "digest-fetch": ["./typings/digest-fetch"]
},
"noEmit": true,
diff --git a/typings/digest-fetch/index.d.ts b/typings/digest-fetch/index.d.ts
deleted file mode 100644
index f6bcbfda9..000000000
--- a/typings/digest-fetch/index.d.ts
+++ /dev/null
@@ -1,33 +0,0 @@
-declare module 'digest-fetch';
-
-import type { RequestInfo, RequestInit, Response } from 'node-fetch';
-
-type Algorithm = 'MD5' | 'MD5-sess';
-
-type Options = {
- algorithm?: Algorithm;
- statusCode?: number;
- cnonceSize?: number;
- basic?: boolean;
- precomputeHash?: boolean;
- logger?: typeof console;
-};
-
-class DigestClient {
- user: string;
- password: string;
-
- private nonceRaw: string;
- private logger?: typeof console;
- private precomputedHash?: boolean;
- private statusCode?: number;
- private basic: boolean;
- private cnonceSize: number;
- private hasAuth: boolean;
- private digest: { nc: number; algorithm: Algorithm; realm: string };
-
- constructor(user: string, password: string, options: Options = {});
- async fetch(url: RequestInfo, options: RequestInit = {}): Promise;
-}
-
-export default DigestClient;
diff --git a/yarn.lock b/yarn.lock
index a79485a26..9cef21d9b 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1076,11 +1076,6 @@ balanced-match@^1.0.0:
resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
-base-64@^0.1.0:
- version "0.1.0"
- resolved "https://registry.yarnpkg.com/base-64/-/base-64-0.1.0.tgz#780a99c84e7d600260361511c4877613bf24f6bb"
- integrity sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==
-
big-integer@^1.6.44:
version "1.6.52"
resolved "https://registry.yarnpkg.com/big-integer/-/big-integer-1.6.52.tgz#60a887f3047614a8e1bffe5d7173490a97dc8c85"
@@ -1193,11 +1188,6 @@ char-regex@^1.0.2:
resolved "https://registry.yarnpkg.com/char-regex/-/char-regex-1.0.2.tgz#d744358226217f981ed58f479b1d6bcc29545dcf"
integrity sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==
-charenc@0.0.2:
- version "0.0.2"
- resolved "https://registry.yarnpkg.com/charenc/-/charenc-0.0.2.tgz#c0a1d2f3a7092e03774bfa83f14c0fc5790a8667"
- integrity sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==
-
ci-info@^3.2.0:
version "3.9.0"
resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.9.0.tgz#4279a62028a7b1f262f3473fc9605f5e218c59b4"
@@ -1305,11 +1295,6 @@ cross-spawn@^7.0.2, cross-spawn@^7.0.3:
shebang-command "^2.0.0"
which "^2.0.1"
-crypt@0.0.2:
- version "0.0.2"
- resolved "https://registry.yarnpkg.com/crypt/-/crypt-0.0.2.tgz#88d7ff7ec0dfb86f713dc87bbb42d044d3e6c41b"
- integrity sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==
-
debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4:
version "4.3.4"
resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865"
@@ -1380,14 +1365,6 @@ diff@^4.0.1:
resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d"
integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==
-digest-fetch@^1.3.0:
- version "1.3.0"
- resolved "https://registry.yarnpkg.com/digest-fetch/-/digest-fetch-1.3.0.tgz#898e69264d00012a23cf26e8a3e40320143fc661"
- integrity sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA==
- dependencies:
- base-64 "^0.1.0"
- md5 "^2.3.0"
-
dir-glob@^3.0.1:
version "3.0.1"
resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f"
@@ -1934,11 +1911,6 @@ is-arrayish@^0.2.1:
resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==
-is-buffer@~1.1.6:
- version "1.1.6"
- resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be"
- integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==
-
is-core-module@^2.13.0:
version "2.13.1"
resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.1.tgz#ad0d7532c6fea9da1ebdc82742d74525c6273384"
@@ -2553,15 +2525,6 @@ makeerror@1.0.12:
dependencies:
tmpl "1.0.5"
-md5@^2.3.0:
- version "2.3.0"
- resolved "https://registry.yarnpkg.com/md5/-/md5-2.3.0.tgz#c3da9a6aae3a30b46b7b0c349b87b110dc3bda4f"
- integrity sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==
- dependencies:
- charenc "0.0.2"
- crypt "0.0.2"
- is-buffer "~1.1.6"
-
merge-stream@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60"
From 8031df3675c36cb654e37a63edbc7e5b02b05bac Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 2 Apr 2024 00:38:55 +0200
Subject: [PATCH 046/533] feat(api): run polling helpers (#749)
refactor: rename createAndStream to stream
---
README.md | 19 +-
api.md | 5 +
examples/assistant-stream-raw.ts | 0
examples/assistant-stream.ts | 2 +-
examples/assistants.ts | 22 +--
helpers.md | 4 +-
src/resources/beta/beta.ts | 1 +
src/resources/beta/index.ts | 1 +
src/resources/beta/threads/index.ts | 4 +
src/resources/beta/threads/runs/index.ts | 3 +
src/resources/beta/threads/runs/runs.ts | 224 ++++++++++++++++++++++-
src/resources/beta/threads/threads.ts | 124 +++++++++++++
12 files changed, 389 insertions(+), 20 deletions(-)
mode change 100644 => 100755 examples/assistant-stream-raw.ts
mode change 100644 => 100755 examples/assistant-stream.ts
mode change 100644 => 100755 examples/assistants.ts
diff --git a/README.md b/README.md
index 6707707b2..1ff9c757d 100644
--- a/README.md
+++ b/README.md
@@ -100,13 +100,30 @@ Documentation for each method, request param, and response field are available i
> [!IMPORTANT]
> Previous versions of this SDK used a `Configuration` class. See the [v3 to v4 migration guide](https://github.com/openai/openai-node/discussions/217).
+### Polling Helpers
+
+When interacting with the API some actions such as starting a Run may take time to complete. The SDK includes
+helper functions which will poll the status until it reaches a terminal state and then return the resulting object.
+If an API method results in an action which could benefit from polling there will be a corresponding version of the
+method ending in 'AndPoll'.
+
+For instance to create a Run and poll until it reaches a terminal state you can run:
+
+```ts
+const run = await openai.beta.threads.runs.createAndPoll(thread.id, {
+ assistant_id: assistantId,
+});
+```
+
+More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle)
+
### Streaming Helpers
The SDK also includes helpers to process streams and handle the incoming events.
```ts
const run = openai.beta.threads.runs
- .createAndStream(thread.id, {
+ .stream(thread.id, {
assistant_id: assistant.id,
})
.on('textCreated', (text) => process.stdout.write('\nassistant > '))
diff --git a/api.md b/api.md
index 504a103c7..2f82dd17b 100644
--- a/api.md
+++ b/api.md
@@ -224,6 +224,7 @@ Methods:
-
client.beta.threads.update(threadId, { ...params }) -> Thread
- client.beta.threads.del(threadId) -> ThreadDeleted
- client.beta.threads.createAndRun({ ...params }) -> Run
+- client.beta.threads.createAndRunPoll(body, options?) -> Promise<Threads.Run>
- client.beta.threads.createAndRunStream(body, options?) -> AssistantStream
### Runs
@@ -242,7 +243,11 @@ Methods:
- client.beta.threads.runs.list(threadId, { ...params }) -> RunsPage
- client.beta.threads.runs.cancel(threadId, runId) -> Run
- client.beta.threads.runs.submitToolOutputs(threadId, runId, { ...params }) -> Run
+- client.beta.threads.runs.createAndPoll(threadId, body, options?) -> Promise<Run>
- client.beta.threads.runs.createAndStream(threadId, body, options?) -> AssistantStream
+- client.beta.threads.runs.poll(threadId, runId, options?) -> Promise<Run>
+- client.beta.threads.runs.stream(threadId, body, options?) -> AssistantStream
+- client.beta.threads.runs.submitToolOutputsAndPoll(threadId, runId, body, options?) -> Promise<Run>
- client.beta.threads.runs.submitToolOutputsStream(threadId, runId, body, options?) -> AssistantStream
#### Steps
diff --git a/examples/assistant-stream-raw.ts b/examples/assistant-stream-raw.ts
old mode 100644
new mode 100755
diff --git a/examples/assistant-stream.ts b/examples/assistant-stream.ts
old mode 100644
new mode 100755
index 36c4ed152..6c71bf23b
--- a/examples/assistant-stream.ts
+++ b/examples/assistant-stream.ts
@@ -31,7 +31,7 @@ async function main() {
console.log('Created thread with Id: ' + threadId);
const run = openai.beta.threads.runs
- .createAndStream(threadId, {
+ .stream(threadId, {
assistant_id: assistantId,
})
//Subscribe to streaming events and log them
diff --git a/examples/assistants.ts b/examples/assistants.ts
old mode 100644
new mode 100755
index bbc2f80ce..40238ac86
--- a/examples/assistants.ts
+++ b/examples/assistants.ts
@@ -1,7 +1,6 @@
#!/usr/bin/env -S npm run tsn -T
import OpenAI from 'openai';
-import { sleep } from 'openai/core';
/**
* Example of polling for a complete response from an assistant
@@ -32,24 +31,17 @@ async function main() {
let threadId = thread.id;
console.log('Created thread with Id: ' + threadId);
- const run = await openai.beta.threads.runs.create(thread.id, {
+ const run = await openai.beta.threads.runs.createAndPoll(thread.id, {
assistant_id: assistantId,
additional_instructions: 'Please address the user as Jane Doe. The user has a premium account.',
});
- console.log('Created run with Id: ' + run.id);
-
- while (true) {
- const result = await openai.beta.threads.runs.retrieve(thread.id, run.id);
- if (result.status == 'completed') {
- const messages = await openai.beta.threads.messages.list(thread.id);
- for (const message of messages.getPaginatedItems()) {
- console.log(message);
- }
- break;
- } else {
- console.log('Waiting for completion. Current status: ' + result.status);
- await sleep(5000);
+ console.log('Run finished with status: ' + run.status);
+
+ if (run.status == 'completed') {
+ const messages = await openai.beta.threads.messages.list(thread.id);
+ for (const message of messages.getPaginatedItems()) {
+ console.log(message);
}
}
}
diff --git a/helpers.md b/helpers.md
index 9a94a618e..7a34c3023 100644
--- a/helpers.md
+++ b/helpers.md
@@ -13,7 +13,7 @@ More information can be found in the documentation: [Assistant Streaming](https:
```ts
const run = openai.beta.threads.runs
- .createAndStream(thread.id, {
+ .stream(thread.id, {
assistant_id: assistant.id,
})
.on('textCreated', (text) => process.stdout.write('\nassistant > '))
@@ -41,7 +41,7 @@ const run = openai.beta.threads.runs
There are three helper methods for creating streams:
```ts
-openai.beta.threads.runs.createAndStream();
+openai.beta.threads.runs.stream();
```
This method can be used to start and stream the response to an existing run with an associated thread
diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts
index 43ee8c7e7..7d4457319 100644
--- a/src/resources/beta/beta.ts
+++ b/src/resources/beta/beta.ts
@@ -37,5 +37,6 @@ export namespace Beta {
export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams;
export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming;
export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming;
+ export import ThreadCreateAndRunPollParams = ThreadsAPI.ThreadCreateAndRunPollParams;
export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams;
}
diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts
index 7f35730fb..e43ff7315 100644
--- a/src/resources/beta/index.ts
+++ b/src/resources/beta/index.ts
@@ -28,6 +28,7 @@ export {
ThreadCreateAndRunParams,
ThreadCreateAndRunParamsNonStreaming,
ThreadCreateAndRunParamsStreaming,
+ ThreadCreateAndRunPollParams,
ThreadCreateAndRunStreamParams,
Threads,
} from './threads/index';
diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts
index 097a52819..ac2f9a4fa 100644
--- a/src/resources/beta/threads/index.ts
+++ b/src/resources/beta/threads/index.ts
@@ -36,10 +36,13 @@ export {
RunCreateParamsStreaming,
RunUpdateParams,
RunListParams,
+ RunCreateAndPollParams,
RunCreateAndStreamParams,
+ RunStreamParams,
RunSubmitToolOutputsParams,
RunSubmitToolOutputsParamsNonStreaming,
RunSubmitToolOutputsParamsStreaming,
+ RunSubmitToolOutputsAndPollParams,
RunSubmitToolOutputsStreamParams,
RunsPage,
Runs,
@@ -52,6 +55,7 @@ export {
ThreadCreateAndRunParams,
ThreadCreateAndRunParamsNonStreaming,
ThreadCreateAndRunParamsStreaming,
+ ThreadCreateAndRunPollParams,
ThreadCreateAndRunStreamParams,
Threads,
} from './threads';
diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts
index 636b5d850..c9b2d1ef5 100644
--- a/src/resources/beta/threads/runs/index.ts
+++ b/src/resources/beta/threads/runs/index.ts
@@ -31,10 +31,13 @@ export {
RunCreateParamsStreaming,
RunUpdateParams,
RunListParams,
+ RunCreateAndPollParams,
RunCreateAndStreamParams,
+ RunStreamParams,
RunSubmitToolOutputsParams,
RunSubmitToolOutputsParamsNonStreaming,
RunSubmitToolOutputsParamsStreaming,
+ RunSubmitToolOutputsAndPollParams,
RunSubmitToolOutputsStreamParams,
RunsPage,
Runs,
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index 54c671131..5dfc7d595 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -5,6 +5,7 @@ import { APIPromise } from 'openai/core';
import { APIResource } from 'openai/resource';
import { isRequestOptions } from 'openai/core';
import { AssistantStream, RunCreateParamsBaseStream } from 'openai/lib/AssistantStream';
+import { sleep } from 'openai/core';
import { RunSubmitToolOutputsParamsStream } from 'openai/lib/AssistantStream';
import * as RunsAPI from 'openai/resources/beta/threads/runs/runs';
import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
@@ -102,8 +103,24 @@ export class Runs extends APIResource {
});
}
+ /**
+ * A helper to create a run an poll for a terminal state. More information on Run
+ * lifecycles can be found here:
+ * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ */
+ async createAndPoll(
+ threadId: string,
+ body: RunCreateParamsNonStreaming,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const run = await this.create(threadId, body, options);
+ return await this.poll(threadId, run.id, options);
+ }
+
/**
* Create a Run stream
+ *
+ * @deprecated use `stream` instead
*/
createAndStream(
threadId: string,
@@ -113,6 +130,66 @@ export class Runs extends APIResource {
return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options);
}
+ /**
+ * A helper to poll a run status until it reaches a terminal state. More
+ * information on Run lifecycles can be found here:
+ * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ */
+ async poll(
+ threadId: string,
+ runId: string,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const headers: { [key: string]: string } = { ...options?.headers, 'X-Stainless-Poll-Helper': 'true' };
+
+ if (options?.pollIntervalMs) {
+ headers['X-Stainless-Custom-Poll-Interval'] = options.pollIntervalMs.toString();
+ }
+
+ while (true) {
+ const { data: run, response } = await this.retrieve(threadId, runId, {
+ ...options,
+ headers: { ...options?.headers, ...headers },
+ }).withResponse();
+
+ switch (run.status) {
+ //If we are in any sort of intermediate state we poll
+ case 'queued':
+ case 'in_progress':
+ case 'cancelling':
+ let sleepInterval = 5000;
+
+ if (options?.pollIntervalMs) {
+ sleepInterval = options.pollIntervalMs;
+ } else {
+ const headerInterval = response.headers.get('openai-poll-after-ms');
+ if (headerInterval) {
+ const headerIntervalMs = parseInt(headerInterval);
+ if (!isNaN(headerIntervalMs)) {
+ sleepInterval = headerIntervalMs;
+ }
+ }
+ }
+ await sleep(sleepInterval);
+ break;
+ //We return the run in any terminal state.
+ case 'requires_action':
+ case 'cancelled':
+ case 'completed':
+ case 'failed':
+ case 'expired':
+ return run;
+ }
+ }
+ }
+
+ /**
+ * Create a Run stream
+ */
+ stream(threadId: string, body: RunCreateParamsBaseStream, options?: Core.RequestOptions): AssistantStream {
+ return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options);
+ }
+
/**
* When a run has the `status: "requires_action"` and `required_action.type` is
* `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
@@ -151,9 +228,25 @@ export class Runs extends APIResource {
}) as APIPromise | APIPromise>;
}
+ /**
+ * A helper to submit a tool output to a run and poll for a terminal run state.
+ * More information on Run lifecycles can be found here:
+ * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ */
+ async submitToolOutputsAndPoll(
+ threadId: string,
+ runId: string,
+ body: RunSubmitToolOutputsParamsNonStreaming,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const run = await this.submitToolOutputs(threadId, runId, body, options);
+ return await this.poll(threadId, run.id, options);
+ }
+
/**
* Submit the tool outputs from a previous run and stream the run to a terminal
- * state.
+ * state. More information on Run lifecycles can be found here:
+ * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
*/
submitToolOutputsStream(
threadId: string,
@@ -529,6 +622,58 @@ export interface RunListParams extends CursorPageParams {
order?: 'asc' | 'desc';
}
+export interface RunCreateAndPollParams {
+ /**
+ * The ID of the
+ * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+ * execute this run.
+ */
+ assistant_id: string;
+
+ /**
+ * Appends additional instructions at the end of the instructions for the run. This
+ * is useful for modifying the behavior on a per-run basis without overriding other
+ * instructions.
+ */
+ additional_instructions?: string | null;
+
+ /**
+ * Overrides the
+ * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+ * of the assistant. This is useful for modifying the behavior on a per-run basis.
+ */
+ instructions?: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+ * be used to execute this run. If a value is provided here, it will override the
+ * model associated with the assistant. If not, the model associated with the
+ * assistant will be used.
+ */
+ model?: string | null;
+
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
+ /**
+ * Override the tools the assistant can use for this run. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ tools?: Array | null;
+}
+
export interface RunCreateAndStreamParams {
/**
* The ID of the
@@ -581,6 +726,58 @@ export interface RunCreateAndStreamParams {
tools?: Array | null;
}
+export interface RunStreamParams {
+ /**
+ * The ID of the
+ * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+ * execute this run.
+ */
+ assistant_id: string;
+
+ /**
+ * Appends additional instructions at the end of the instructions for the run. This
+ * is useful for modifying the behavior on a per-run basis without overriding other
+ * instructions.
+ */
+ additional_instructions?: string | null;
+
+ /**
+ * Overrides the
+ * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+ * of the assistant. This is useful for modifying the behavior on a per-run basis.
+ */
+ instructions?: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+ * be used to execute this run. If a value is provided here, it will override the
+ * model associated with the assistant. If not, the model associated with the
+ * assistant will be used.
+ */
+ model?: string | null;
+
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
+ /**
+ * Override the tools the assistant can use for this run. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ tools?: Array | null;
+}
+
export type RunSubmitToolOutputsParams =
| RunSubmitToolOutputsParamsNonStreaming
| RunSubmitToolOutputsParamsStreaming;
@@ -635,6 +832,28 @@ export interface RunSubmitToolOutputsParamsStreaming extends RunSubmitToolOutput
stream: true;
}
+export interface RunSubmitToolOutputsAndPollParams {
+ /**
+ * A list of tools for which the outputs are being submitted.
+ */
+ tool_outputs: Array;
+}
+
+export namespace RunSubmitToolOutputsAndPollParams {
+ export interface ToolOutput {
+ /**
+ * The output of the tool call to be submitted to continue the run.
+ */
+ output?: string;
+
+ /**
+ * The ID of the tool call in the `required_action` object within the run object
+ * the output is being submitted for.
+ */
+ tool_call_id?: string;
+ }
+}
+
export interface RunSubmitToolOutputsStreamParams {
/**
* A list of tools for which the outputs are being submitted.
@@ -667,10 +886,13 @@ export namespace Runs {
export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
export import RunUpdateParams = RunsAPI.RunUpdateParams;
export import RunListParams = RunsAPI.RunListParams;
+ export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams;
export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams;
+ export import RunStreamParams = RunsAPI.RunStreamParams;
export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams;
export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming;
export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming;
+ export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams;
export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams;
export import Steps = StepsAPI.Steps;
export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs;
diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts
index 9b4785850..1b4b3f7d5 100644
--- a/src/resources/beta/threads/threads.ts
+++ b/src/resources/beta/threads/threads.ts
@@ -92,6 +92,19 @@ export class Threads extends APIResource {
}) as APIPromise | APIPromise>;
}
+ /**
+ * A helper to create a thread, start a run and then poll for a terminal state.
+ * More information on Run lifecycles can be found here:
+ * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ */
+ async createAndRunPoll(
+ body: ThreadCreateAndRunParamsNonStreaming,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const run = await this.createAndRun(body, options);
+ return await this.runs.poll(run.thread_id, run.id, options);
+ }
+
/**
* Create a thread and stream the run back
*/
@@ -340,6 +353,113 @@ export interface ThreadCreateAndRunParamsStreaming extends ThreadCreateAndRunPar
stream: true;
}
+export interface ThreadCreateAndRunPollParams {
+ /**
+ * The ID of the
+ * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+ * execute this run.
+ */
+ assistant_id: string;
+
+ /**
+ * Override the default system message of the assistant. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ instructions?: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+ * be used to execute this run. If a value is provided here, it will override the
+ * model associated with the assistant. If not, the model associated with the
+ * assistant will be used.
+ */
+ model?: string | null;
+
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
+ /**
+ * If no thread is provided, an empty thread will be created.
+ */
+ thread?: ThreadCreateAndRunPollParams.Thread;
+
+ /**
+ * Override the tools the assistant can use for this run. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ tools?: Array<
+ AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool
+ > | null;
+}
+
+export namespace ThreadCreateAndRunPollParams {
+ /**
+ * If no thread is provided, an empty thread will be created.
+ */
+ export interface Thread {
+ /**
+ * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
+ * start the thread with.
+ */
+ messages?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+ }
+
+ export namespace Thread {
+ export interface Message {
+ /**
+ * The content of the message.
+ */
+ content: string;
+
+ /**
+ * The role of the entity that is creating the message. Allowed values include:
+ *
+ * - `user`: Indicates the message is sent by an actual user and should be used in
+ * most cases to represent user-generated messages.
+ * - `assistant`: Indicates the message is generated by the assistant. Use this
+ * value to insert messages from the assistant into the conversation.
+ */
+ role: 'user' | 'assistant';
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the message should use. There can be a maximum of 10 files attached to a
+ * message. Useful for tools like `retrieval` and `code_interpreter` that can
+ * access and use files.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+ }
+ }
+}
+
export interface ThreadCreateAndRunStreamParams {
/**
* The ID of the
@@ -455,6 +575,7 @@ export namespace Threads {
export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams;
export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming;
export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming;
+ export import ThreadCreateAndRunPollParams = ThreadsAPI.ThreadCreateAndRunPollParams;
export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams;
export import Runs = RunsAPI.Runs;
export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall;
@@ -466,10 +587,13 @@ export namespace Threads {
export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
export import RunUpdateParams = RunsAPI.RunUpdateParams;
export import RunListParams = RunsAPI.RunListParams;
+ export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams;
export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams;
+ export import RunStreamParams = RunsAPI.RunStreamParams;
export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams;
export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming;
export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming;
+ export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams;
export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams;
export import Messages = MessagesAPI.Messages;
export import Annotation = MessagesAPI.Annotation;
From 445b795c4ef4c109e69d1e3d74b179f238e5782c Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 2 Apr 2024 00:39:14 +0200
Subject: [PATCH 047/533] release: 4.32.0
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 24 ++++++++++++++++++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 29 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 485bcd4e9..a2b09ee37 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.31.0"
+ ".": "4.32.0"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6a28f8d3c..3be8b4c02 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,29 @@
# Changelog
+## 4.32.0 (2024-04-01)
+
+Full Changelog: [v4.31.0...v4.32.0](https://github.com/openai/openai-node/compare/v4.31.0...v4.32.0)
+
+### Features
+
+* **api:** add support for filtering messages by run_id ([#747](https://github.com/openai/openai-node/issues/747)) ([9a397ac](https://github.com/openai/openai-node/commit/9a397acffa9f10c3f48e86e3bdb3851770f87b42))
+* **api:** run polling helpers ([#749](https://github.com/openai/openai-node/issues/749)) ([02920ae](https://github.com/openai/openai-node/commit/02920ae082480fc7a7ffe9fa583d053a40dc7120))
+
+
+### Chores
+
+* **deps:** remove unused dependency digest-fetch ([#748](https://github.com/openai/openai-node/issues/748)) ([5376837](https://github.com/openai/openai-node/commit/537683734d39dd956a7dcef4339c1167ce6fe13c))
+
+
+### Documentation
+
+* **readme:** change undocumented params wording ([#744](https://github.com/openai/openai-node/issues/744)) ([8796691](https://github.com/openai/openai-node/commit/87966911045275db86844dfdcde59653edaef264))
+
+
+### Refactors
+
+* rename createAndStream to stream ([02920ae](https://github.com/openai/openai-node/commit/02920ae082480fc7a7ffe9fa583d053a40dc7120))
+
## 4.31.0 (2024-03-30)
Full Changelog: [v4.30.0...v4.31.0](https://github.com/openai/openai-node/compare/v4.30.0...v4.31.0)
diff --git a/README.md b/README.md
index 1ff9c757d..2adc81afc 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.31.0/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.32.0/mod.ts';
```
diff --git a/build-deno b/build-deno
index 66639030f..19eefa7c3 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.31.0/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.32.0/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index 6fb9f1789..11fa0c5e2 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.31.0",
+ "version": "4.32.0",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index 8eb5423f5..7e04c79b5 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.31.0'; // x-release-please-version
+export const VERSION = '4.32.0'; // x-release-please-version
From 5b41d1077f219b8feb7557cfab98caf7b5de560d Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 2 Apr 2024 15:02:43 +0200
Subject: [PATCH 048/533] chore(deps): bump yarn to v1.22.22 (#751)
---
package.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/package.json b/package.json
index 11fa0c5e2..e10df6850 100644
--- a/package.json
+++ b/package.json
@@ -8,7 +8,7 @@
"type": "commonjs",
"repository": "github:openai/openai-node",
"license": "Apache-2.0",
- "packageManager": "yarn@1.22.21",
+ "packageManager": "yarn@1.22.22",
"files": [
"*"
],
From b3269eb0cbeb17415de0863f5cb28c4a9f8b643f Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 2 Apr 2024 15:03:03 +0200
Subject: [PATCH 049/533] release: 4.32.1
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 8 ++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index a2b09ee37..27308d159 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.32.0"
+ ".": "4.32.1"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3be8b4c02..a1702ad3b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,13 @@
# Changelog
+## 4.32.1 (2024-04-02)
+
+Full Changelog: [v4.32.0...v4.32.1](https://github.com/openai/openai-node/compare/v4.32.0...v4.32.1)
+
+### Chores
+
+* **deps:** bump yarn to v1.22.22 ([#751](https://github.com/openai/openai-node/issues/751)) ([5b41d10](https://github.com/openai/openai-node/commit/5b41d1077f219b8feb7557cfab98caf7b5de560d))
+
## 4.32.0 (2024-04-01)
Full Changelog: [v4.31.0...v4.32.0](https://github.com/openai/openai-node/compare/v4.31.0...v4.32.0)
diff --git a/README.md b/README.md
index 2adc81afc..aae0367b6 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.32.0/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.32.1/mod.ts';
```
diff --git a/build-deno b/build-deno
index 19eefa7c3..a56b6af13 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.32.0/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.32.1/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index e10df6850..4d87ed952 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.32.0",
+ "version": "4.32.1",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index 7e04c79b5..c2e5453c3 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.32.0'; // x-release-please-version
+export const VERSION = '4.32.1'; // x-release-please-version
From 2bd3294ed564492def05a61906e02ae5f2aba6c4 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 2 Apr 2024 19:33:02 +0200
Subject: [PATCH 050/533] chore(tests): bump ecosystem tests dependencies
(#753)
---
.../cloudflare-worker/package-lock.json | 14 ++--
.../node-ts-cjs-auto/package-lock.json | 20 ++---
.../node-ts-cjs-web/package-lock.json | 48 ++++--------
ecosystem-tests/node-ts-cjs/package-lock.json | 48 ++++--------
.../node-ts-esm-auto/package-lock.json | 26 +++----
.../node-ts-esm-web/package-lock.json | 26 +++----
ecosystem-tests/node-ts-esm/package-lock.json | 26 +++----
.../node-ts4.5-jest27/package-lock.json | 16 ++--
.../node-ts4.5-jest27/package.json | 4 +-
.../ts-browser-webpack/package-lock.json | 20 ++---
ecosystem-tests/vercel-edge/package-lock.json | 78 ++++++++++---------
ecosystem-tests/vercel-edge/package.json | 4 +-
12 files changed, 152 insertions(+), 178 deletions(-)
diff --git a/ecosystem-tests/cloudflare-worker/package-lock.json b/ecosystem-tests/cloudflare-worker/package-lock.json
index 7e86792db..dd42f0b36 100644
--- a/ecosystem-tests/cloudflare-worker/package-lock.json
+++ b/ecosystem-tests/cloudflare-worker/package-lock.json
@@ -5206,9 +5206,9 @@
}
},
"node_modules/ts-jest": {
- "version": "29.1.1",
- "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz",
- "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==",
+ "version": "29.1.2",
+ "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz",
+ "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==",
"dev": true,
"dependencies": {
"bs-logger": "0.x",
@@ -5224,7 +5224,7 @@
"ts-jest": "cli.js"
},
"engines": {
- "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ "node": "^16.10.0 || ^18.0.0 || >=20.0.0"
},
"peerDependencies": {
"@babel/core": ">=7.0.0-beta.0 <8",
@@ -5261,9 +5261,9 @@
}
},
"node_modules/ts-jest/node_modules/semver": {
- "version": "7.5.4",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
- "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "version": "7.6.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz",
+ "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
diff --git a/ecosystem-tests/node-ts-cjs-auto/package-lock.json b/ecosystem-tests/node-ts-cjs-auto/package-lock.json
index a11f9814d..c3880beb2 100644
--- a/ecosystem-tests/node-ts-cjs-auto/package-lock.json
+++ b/ecosystem-tests/node-ts-cjs-auto/package-lock.json
@@ -1093,22 +1093,22 @@
}
},
"node_modules/@types/node": {
- "version": "20.11.20",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz",
- "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==",
+ "version": "20.11.30",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz",
+ "integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==",
"dev": true,
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/@types/node-fetch": {
- "version": "2.6.4",
- "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.4.tgz",
- "integrity": "sha512-1ZX9fcN4Rvkvgv4E6PAY5WXUFWFcRWxZa3EW83UjycOB9ljJCedb2CupIP4RZMEwF/M3eTcCihbBRgwtGbg5Rg==",
+ "version": "2.6.11",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz",
+ "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==",
"dev": true,
"dependencies": {
"@types/node": "*",
- "form-data": "^3.0.0"
+ "form-data": "^4.0.0"
}
},
"node_modules/@types/prettier": {
@@ -1783,9 +1783,9 @@
}
},
"node_modules/form-data": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz",
- "integrity": "sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==",
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
+ "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
"dev": true,
"dependencies": {
"asynckit": "^0.4.0",
diff --git a/ecosystem-tests/node-ts-cjs-web/package-lock.json b/ecosystem-tests/node-ts-cjs-web/package-lock.json
index cd721ae53..ff6fb3bac 100644
--- a/ecosystem-tests/node-ts-cjs-web/package-lock.json
+++ b/ecosystem-tests/node-ts-cjs-web/package-lock.json
@@ -1143,13 +1143,13 @@
"dev": true
},
"node_modules/@types/node-fetch": {
- "version": "2.6.4",
- "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.4.tgz",
- "integrity": "sha512-1ZX9fcN4Rvkvgv4E6PAY5WXUFWFcRWxZa3EW83UjycOB9ljJCedb2CupIP4RZMEwF/M3eTcCihbBRgwtGbg5Rg==",
+ "version": "2.6.11",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz",
+ "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==",
"dev": true,
"dependencies": {
"@types/node": "*",
- "form-data": "^3.0.0"
+ "form-data": "^4.0.0"
}
},
"node_modules/@types/stack-utils": {
@@ -2082,9 +2082,9 @@
}
},
"node_modules/form-data": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz",
- "integrity": "sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==",
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
+ "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
"dev": true,
"dependencies": {
"asynckit": "^0.4.0",
@@ -3208,20 +3208,6 @@
}
}
},
- "node_modules/jsdom/node_modules/form-data": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
- "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
- "dev": true,
- "dependencies": {
- "asynckit": "^0.4.0",
- "combined-stream": "^1.0.8",
- "mime-types": "^2.1.12"
- },
- "engines": {
- "node": ">= 6"
- }
- },
"node_modules/jsdom/node_modules/tr46": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-3.0.0.tgz",
@@ -4150,9 +4136,9 @@
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
},
"node_modules/ts-jest": {
- "version": "29.1.1",
- "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz",
- "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==",
+ "version": "29.1.2",
+ "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz",
+ "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==",
"dev": true,
"dependencies": {
"bs-logger": "0.x",
@@ -4168,7 +4154,7 @@
"ts-jest": "cli.js"
},
"engines": {
- "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ "node": "^16.10.0 || ^18.0.0 || >=20.0.0"
},
"peerDependencies": {
"@babel/core": ">=7.0.0-beta.0 <8",
@@ -4205,9 +4191,9 @@
}
},
"node_modules/ts-jest/node_modules/semver": {
- "version": "7.5.4",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
- "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "version": "7.6.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz",
+ "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
@@ -4391,9 +4377,9 @@
}
},
"node_modules/whatwg-fetch": {
- "version": "3.6.19",
- "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.19.tgz",
- "integrity": "sha512-d67JP4dHSbm2TrpFj8AbO8DnL1JXL5J9u0Kq2xW6d0TFDbCA3Muhdt8orXC22utleTVj7Prqt82baN6RBvnEgw==",
+ "version": "3.6.20",
+ "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz",
+ "integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==",
"dev": true
},
"node_modules/whatwg-mimetype": {
diff --git a/ecosystem-tests/node-ts-cjs/package-lock.json b/ecosystem-tests/node-ts-cjs/package-lock.json
index c5280c5b5..c9493b515 100644
--- a/ecosystem-tests/node-ts-cjs/package-lock.json
+++ b/ecosystem-tests/node-ts-cjs/package-lock.json
@@ -1135,22 +1135,22 @@
}
},
"node_modules/@types/node": {
- "version": "20.11.20",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz",
- "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==",
+ "version": "20.11.30",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz",
+ "integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==",
"dev": true,
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/@types/node-fetch": {
- "version": "2.6.4",
- "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.4.tgz",
- "integrity": "sha512-1ZX9fcN4Rvkvgv4E6PAY5WXUFWFcRWxZa3EW83UjycOB9ljJCedb2CupIP4RZMEwF/M3eTcCihbBRgwtGbg5Rg==",
+ "version": "2.6.11",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz",
+ "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==",
"dev": true,
"dependencies": {
"@types/node": "*",
- "form-data": "^3.0.0"
+ "form-data": "^4.0.0"
}
},
"node_modules/@types/stack-utils": {
@@ -2069,9 +2069,9 @@
}
},
"node_modules/form-data": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-3.0.1.tgz",
- "integrity": "sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==",
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
+ "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
"dev": true,
"dependencies": {
"asynckit": "^0.4.0",
@@ -3175,20 +3175,6 @@
}
}
},
- "node_modules/jsdom/node_modules/form-data": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
- "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
- "dev": true,
- "dependencies": {
- "asynckit": "^0.4.0",
- "combined-stream": "^1.0.8",
- "mime-types": "^2.1.12"
- },
- "engines": {
- "node": ">= 6"
- }
- },
"node_modules/jsdom/node_modules/tr46": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-3.0.0.tgz",
@@ -4117,9 +4103,9 @@
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
},
"node_modules/ts-jest": {
- "version": "29.1.1",
- "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz",
- "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==",
+ "version": "29.1.2",
+ "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz",
+ "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==",
"dev": true,
"dependencies": {
"bs-logger": "0.x",
@@ -4135,7 +4121,7 @@
"ts-jest": "cli.js"
},
"engines": {
- "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ "node": "^16.10.0 || ^18.0.0 || >=20.0.0"
},
"peerDependencies": {
"@babel/core": ">=7.0.0-beta.0 <8",
@@ -4172,9 +4158,9 @@
}
},
"node_modules/ts-jest/node_modules/semver": {
- "version": "7.5.4",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
- "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "version": "7.6.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz",
+ "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
diff --git a/ecosystem-tests/node-ts-esm-auto/package-lock.json b/ecosystem-tests/node-ts-esm-auto/package-lock.json
index 4bce04f80..3e4438d05 100644
--- a/ecosystem-tests/node-ts-esm-auto/package-lock.json
+++ b/ecosystem-tests/node-ts-esm-auto/package-lock.json
@@ -1157,9 +1157,9 @@
}
},
"node_modules/@types/node": {
- "version": "20.11.20",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz",
- "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==",
+ "version": "20.11.30",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz",
+ "integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==",
"dev": true,
"dependencies": {
"undici-types": "~5.26.4"
@@ -3663,9 +3663,9 @@
}
},
"node_modules/ts-jest": {
- "version": "29.1.1",
- "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz",
- "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==",
+ "version": "29.1.2",
+ "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz",
+ "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==",
"dev": true,
"dependencies": {
"bs-logger": "0.x",
@@ -3681,7 +3681,7 @@
"ts-jest": "cli.js"
},
"engines": {
- "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ "node": "^16.10.0 || ^18.0.0 || >=20.0.0"
},
"peerDependencies": {
"@babel/core": ">=7.0.0-beta.0 <8",
@@ -3718,9 +3718,9 @@
}
},
"node_modules/ts-jest/node_modules/semver": {
- "version": "7.5.4",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
- "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "version": "7.6.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz",
+ "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
@@ -3739,9 +3739,9 @@
"dev": true
},
"node_modules/ts-node": {
- "version": "10.9.1",
- "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz",
- "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==",
+ "version": "10.9.2",
+ "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz",
+ "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==",
"dev": true,
"dependencies": {
"@cspotcode/source-map-support": "^0.8.0",
diff --git a/ecosystem-tests/node-ts-esm-web/package-lock.json b/ecosystem-tests/node-ts-esm-web/package-lock.json
index b96128a4e..118bf0909 100644
--- a/ecosystem-tests/node-ts-esm-web/package-lock.json
+++ b/ecosystem-tests/node-ts-esm-web/package-lock.json
@@ -1157,9 +1157,9 @@
}
},
"node_modules/@types/node": {
- "version": "20.11.20",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz",
- "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==",
+ "version": "20.11.30",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz",
+ "integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==",
"dev": true,
"dependencies": {
"undici-types": "~5.26.4"
@@ -3663,9 +3663,9 @@
}
},
"node_modules/ts-jest": {
- "version": "29.1.1",
- "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz",
- "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==",
+ "version": "29.1.2",
+ "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz",
+ "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==",
"dev": true,
"dependencies": {
"bs-logger": "0.x",
@@ -3681,7 +3681,7 @@
"ts-jest": "cli.js"
},
"engines": {
- "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ "node": "^16.10.0 || ^18.0.0 || >=20.0.0"
},
"peerDependencies": {
"@babel/core": ">=7.0.0-beta.0 <8",
@@ -3718,9 +3718,9 @@
}
},
"node_modules/ts-jest/node_modules/semver": {
- "version": "7.5.4",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
- "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "version": "7.6.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz",
+ "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
@@ -3739,9 +3739,9 @@
"dev": true
},
"node_modules/ts-node": {
- "version": "10.9.1",
- "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz",
- "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==",
+ "version": "10.9.2",
+ "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz",
+ "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==",
"dev": true,
"dependencies": {
"@cspotcode/source-map-support": "^0.8.0",
diff --git a/ecosystem-tests/node-ts-esm/package-lock.json b/ecosystem-tests/node-ts-esm/package-lock.json
index 4aecff6ca..cb5b8eaa8 100644
--- a/ecosystem-tests/node-ts-esm/package-lock.json
+++ b/ecosystem-tests/node-ts-esm/package-lock.json
@@ -1157,9 +1157,9 @@
}
},
"node_modules/@types/node": {
- "version": "20.11.20",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz",
- "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==",
+ "version": "20.11.30",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz",
+ "integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==",
"dev": true,
"dependencies": {
"undici-types": "~5.26.4"
@@ -3663,9 +3663,9 @@
}
},
"node_modules/ts-jest": {
- "version": "29.1.1",
- "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz",
- "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==",
+ "version": "29.1.2",
+ "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz",
+ "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==",
"dev": true,
"dependencies": {
"bs-logger": "0.x",
@@ -3681,7 +3681,7 @@
"ts-jest": "cli.js"
},
"engines": {
- "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ "node": "^16.10.0 || ^18.0.0 || >=20.0.0"
},
"peerDependencies": {
"@babel/core": ">=7.0.0-beta.0 <8",
@@ -3718,9 +3718,9 @@
}
},
"node_modules/ts-jest/node_modules/semver": {
- "version": "7.5.4",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
- "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "version": "7.6.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz",
+ "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
@@ -3739,9 +3739,9 @@
"dev": true
},
"node_modules/ts-node": {
- "version": "10.9.1",
- "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz",
- "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==",
+ "version": "10.9.2",
+ "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz",
+ "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==",
"dev": true,
"dependencies": {
"@cspotcode/source-map-support": "^0.8.0",
diff --git a/ecosystem-tests/node-ts4.5-jest27/package-lock.json b/ecosystem-tests/node-ts4.5-jest27/package-lock.json
index 76813597f..bedd114f8 100644
--- a/ecosystem-tests/node-ts4.5-jest27/package-lock.json
+++ b/ecosystem-tests/node-ts4.5-jest27/package-lock.json
@@ -14,13 +14,13 @@
},
"devDependencies": {
"@types/jest": "27.5.2",
- "@types/node": "^20.4.2",
+ "@types/node": "20.11.20",
"@types/node-fetch": "^2.6.1",
"@types/ws": "^8.5.4",
"fastest-levenshtein": "^1.0.16",
"jest": "27.5.1",
"ts-jest": "27.1.5",
- "typescript": "4.5.4"
+ "typescript": "4.5.5"
}
},
"node_modules/@ampproject/remapping": {
@@ -1077,9 +1077,9 @@
}
},
"node_modules/@types/node-fetch": {
- "version": "2.6.5",
- "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.5.tgz",
- "integrity": "sha512-OZsUlr2nxvkqUFLSaY2ZbA+P1q22q+KrlxWOn/38RX+u5kTkYL2mTujEpzUhGkS+K/QCYp9oagfXG39XOzyySg==",
+ "version": "2.6.11",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz",
+ "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==",
"dev": true,
"dependencies": {
"@types/node": "*",
@@ -4108,9 +4108,9 @@
}
},
"node_modules/typescript": {
- "version": "4.5.4",
- "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.4.tgz",
- "integrity": "sha512-VgYs2A2QIRuGphtzFV7aQJduJ2gyfTljngLzjpfW9FoYZF6xuw1W0vW9ghCKLfcWrCFxK81CSGRAvS1pn4fIUg==",
+ "version": "4.5.5",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.5.tgz",
+ "integrity": "sha512-TCTIul70LyWe6IJWT8QSYeA54WQe8EjQFU4wY52Fasj5UKx88LNYKCgBEHcOMOrFF1rKGbD8v/xcNWVUq9SymA==",
"dev": true,
"bin": {
"tsc": "bin/tsc",
diff --git a/ecosystem-tests/node-ts4.5-jest27/package.json b/ecosystem-tests/node-ts4.5-jest27/package.json
index 1740acae8..ae76bcc9c 100644
--- a/ecosystem-tests/node-ts4.5-jest27/package.json
+++ b/ecosystem-tests/node-ts4.5-jest27/package.json
@@ -13,13 +13,13 @@
"tsconfig-paths": "^4.0.0"
},
"devDependencies": {
- "@types/node": "^20.4.2",
+ "@types/node": "20.11.20",
"@types/node-fetch": "^2.6.1",
"@types/jest": "27.5.2",
"@types/ws": "^8.5.4",
"fastest-levenshtein": "^1.0.16",
"jest": "27.5.1",
"ts-jest": "27.1.5",
- "typescript": "4.5.4"
+ "typescript": "4.5.5"
}
}
diff --git a/ecosystem-tests/ts-browser-webpack/package-lock.json b/ecosystem-tests/ts-browser-webpack/package-lock.json
index b8f507e9b..686d0c2f9 100644
--- a/ecosystem-tests/ts-browser-webpack/package-lock.json
+++ b/ecosystem-tests/ts-browser-webpack/package-lock.json
@@ -6604,9 +6604,9 @@
"dev": true
},
"node_modules/ts-node": {
- "version": "10.9.1",
- "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz",
- "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==",
+ "version": "10.9.2",
+ "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz",
+ "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==",
"dev": true,
"dependencies": {
"@cspotcode/source-map-support": "^0.8.0",
@@ -6978,9 +6978,9 @@
}
},
"node_modules/webpack-dev-middleware": {
- "version": "5.3.3",
- "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz",
- "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==",
+ "version": "5.3.4",
+ "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz",
+ "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==",
"dev": true,
"dependencies": {
"colorette": "^2.0.10",
@@ -7001,9 +7001,9 @@
}
},
"node_modules/webpack-dev-server": {
- "version": "4.15.1",
- "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.1.tgz",
- "integrity": "sha512-5hbAst3h3C3L8w6W4P96L5vaV0PxSmJhxZvWKYIdgxOQm8pNZ5dEOmmSLBVpP85ReeyRt6AS1QJNyo/oFFPeVA==",
+ "version": "4.15.2",
+ "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz",
+ "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==",
"dev": true,
"dependencies": {
"@types/bonjour": "^3.5.9",
@@ -7034,7 +7034,7 @@
"serve-index": "^1.9.1",
"sockjs": "^0.3.24",
"spdy": "^4.0.2",
- "webpack-dev-middleware": "^5.3.1",
+ "webpack-dev-middleware": "^5.3.4",
"ws": "^8.13.0"
},
"bin": {
diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json
index ebac7eb81..fdfe2952d 100644
--- a/ecosystem-tests/vercel-edge/package-lock.json
+++ b/ecosystem-tests/vercel-edge/package-lock.json
@@ -15,8 +15,8 @@
},
"devDependencies": {
"@types/node": "20.3.3",
- "@types/react": "18.2.58",
- "@types/react-dom": "18.2.19",
+ "@types/react": "18.2.74",
+ "@types/react-dom": "18.2.23",
"edge-runtime": "^2.4.3",
"fastest-levenshtein": "^1.0.16",
"jest": "^29.5.0",
@@ -730,9 +730,9 @@
}
},
"node_modules/@edge-runtime/format": {
- "version": "2.2.0",
- "resolved": "https://registry.npmjs.org/@edge-runtime/format/-/format-2.2.0.tgz",
- "integrity": "sha512-gPrS6AVw/qJJL0vcxMXv4kFXCU3ZTCD1uuJpwX15YxHV8BgU9OG5v9LrkkXcr96PBT/9epypfNJMhlWADuEziw==",
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/@edge-runtime/format/-/format-2.2.1.tgz",
+ "integrity": "sha512-JQTRVuiusQLNNLe2W9tnzBlV/GvSVcozLl4XZHk5swnRZ/v6jp8TqR8P7sqmJsQqblDZ3EztcWmLDbhRje/+8g==",
"dev": true,
"engines": {
"node": ">=16"
@@ -747,22 +747,31 @@
"node": ">=14"
}
},
+ "node_modules/@edge-runtime/ponyfill": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/@edge-runtime/ponyfill/-/ponyfill-2.4.2.tgz",
+ "integrity": "sha512-oN17GjFr69chu6sDLvXxdhg0Qe8EZviGSuqzR9qOiKh4MhFYGdBBcqRNzdmYeAdeRzOW2mM9yil4RftUQ7sUOA==",
+ "dev": true,
+ "engines": {
+ "node": ">=16"
+ }
+ },
"node_modules/@edge-runtime/primitives": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/@edge-runtime/primitives/-/primitives-3.1.0.tgz",
- "integrity": "sha512-yxr1QM/lC8nrU38zxePeDqVeIjwsJ83gKGTH8YJ4CoHTv3q+6xEeqRIT+/9IPX/FApWYtnxHauhNqr6CHRj5YA==",
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/@edge-runtime/primitives/-/primitives-4.1.0.tgz",
+ "integrity": "sha512-Vw0lbJ2lvRUqc7/soqygUX216Xb8T3WBZ987oywz6aJqRxcwSVWwr9e+Nqo2m9bxobA9mdbWNNoRY6S9eko1EQ==",
"dev": true,
"engines": {
"node": ">=16"
}
},
"node_modules/@edge-runtime/vm": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/@edge-runtime/vm/-/vm-3.1.0.tgz",
- "integrity": "sha512-Y2JZgJP+4byI17SiDeEZhvBUvJ+om7E5ll/jrS7aGRpet5qKnJSsGep6xxhMjqT/j8ulFvTMN/kdlMMy5pEKBQ==",
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/@edge-runtime/vm/-/vm-3.2.0.tgz",
+ "integrity": "sha512-0dEVyRLM/lG4gp1R/Ik5bfPl/1wX00xFwd5KcNH602tzBa09oF7pbTKETEhR1GjZ75K6OJnYFu8II2dyMhONMw==",
"dev": true,
"dependencies": {
- "@edge-runtime/primitives": "3.1.0"
+ "@edge-runtime/primitives": "4.1.0"
},
"engines": {
"node": ">=16"
@@ -1562,31 +1571,24 @@
"dev": true
},
"node_modules/@types/react": {
- "version": "18.2.58",
- "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.58.tgz",
- "integrity": "sha512-TaGvMNhxvG2Q0K0aYxiKfNDS5m5ZsoIBBbtfUorxdH4NGSXIlYvZxLJI+9Dd3KjeB3780bciLyAb7ylO8pLhPw==",
+ "version": "18.2.74",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.74.tgz",
+ "integrity": "sha512-9AEqNZZyBx8OdZpxzQlaFEVCSFUM2YXJH46yPOiOpm078k6ZLOCcuAzGum/zK8YBwY+dbahVNbHrbgrAwIRlqw==",
"dev": true,
"dependencies": {
"@types/prop-types": "*",
- "@types/scheduler": "*",
"csstype": "^3.0.2"
}
},
"node_modules/@types/react-dom": {
- "version": "18.2.19",
- "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.19.tgz",
- "integrity": "sha512-aZvQL6uUbIJpjZk4U8JZGbau9KDeAwMfmhyWorxgBkqDIEf6ROjRozcmPIicqsUwPUjbkDfHKgGee1Lq65APcA==",
+ "version": "18.2.23",
+ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.23.tgz",
+ "integrity": "sha512-ZQ71wgGOTmDYpnav2knkjr3qXdAFu0vsk8Ci5w3pGAIdj7/kKAyn+VsQDhXsmzzzepAiI9leWMmubXz690AI/A==",
"dev": true,
"dependencies": {
"@types/react": "*"
}
},
- "node_modules/@types/scheduler": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz",
- "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==",
- "dev": true
- },
"node_modules/@types/stack-utils": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz",
@@ -2940,17 +2942,17 @@
"dev": true
},
"node_modules/edge-runtime": {
- "version": "2.5.0",
- "resolved": "https://registry.npmjs.org/edge-runtime/-/edge-runtime-2.5.0.tgz",
- "integrity": "sha512-QgDNX6R+RPwhY3+vqHpvYE4XUoB/cFG60nGBKu9pmPOJxQleeTCj2F5CHimIpNqex9h1Cy2Y3tuQ+Vq2GzmZIA==",
+ "version": "2.5.9",
+ "resolved": "https://registry.npmjs.org/edge-runtime/-/edge-runtime-2.5.9.tgz",
+ "integrity": "sha512-pk+k0oK0PVXdlT4oRp4lwh+unuKB7Ng4iZ2HB+EZ7QCEQizX360Rp/F4aRpgpRgdP2ufB35N+1KppHmYjqIGSg==",
"dev": true,
"dependencies": {
- "@edge-runtime/format": "2.2.0",
- "@edge-runtime/vm": "3.1.0",
+ "@edge-runtime/format": "2.2.1",
+ "@edge-runtime/ponyfill": "2.4.2",
+ "@edge-runtime/vm": "3.2.0",
"async-listen": "3.0.1",
"mri": "1.2.0",
"picocolors": "1.0.0",
- "pretty-bytes": "5.6.0",
"pretty-ms": "7.0.1",
"signal-exit": "4.0.2",
"time-span": "4.0.0"
@@ -6249,9 +6251,9 @@
"dev": true
},
"node_modules/ts-jest": {
- "version": "29.1.1",
- "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.1.tgz",
- "integrity": "sha512-D6xjnnbP17cC85nliwGiL+tpoKN0StpgE0TeOjXQTU6MVCfsB4v7aW05CgQ/1OywGb0x/oy9hHFnN+sczTiRaA==",
+ "version": "29.1.2",
+ "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.2.tgz",
+ "integrity": "sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g==",
"dev": true,
"dependencies": {
"bs-logger": "0.x",
@@ -6267,7 +6269,7 @@
"ts-jest": "cli.js"
},
"engines": {
- "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ "node": "^16.10.0 || ^18.0.0 || >=20.0.0"
},
"peerDependencies": {
"@babel/core": ">=7.0.0-beta.0 <8",
@@ -6292,9 +6294,9 @@
}
},
"node_modules/ts-jest/node_modules/semver": {
- "version": "7.5.4",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
- "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "version": "7.6.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz",
+ "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==",
"dev": true,
"dependencies": {
"lru-cache": "^6.0.0"
diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json
index 171ba9c1a..48223796c 100644
--- a/ecosystem-tests/vercel-edge/package.json
+++ b/ecosystem-tests/vercel-edge/package.json
@@ -21,8 +21,8 @@
},
"devDependencies": {
"@types/node": "20.3.3",
- "@types/react": "18.2.58",
- "@types/react-dom": "18.2.19",
+ "@types/react": "18.2.74",
+ "@types/react-dom": "18.2.23",
"edge-runtime": "^2.4.3",
"fastest-levenshtein": "^1.0.16",
"jest": "^29.5.0",
From c5eb4eaf7f7422bb3f9745d85c908d238d065fb3 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 2 Apr 2024 19:57:39 +0200
Subject: [PATCH 051/533] fix(tests): update wrangler to v3.19.0
(CVE-2023-7080) (#755)
---
.../cloudflare-worker/package-lock.json | 750 ++++--------------
1 file changed, 146 insertions(+), 604 deletions(-)
diff --git a/ecosystem-tests/cloudflare-worker/package-lock.json b/ecosystem-tests/cloudflare-worker/package-lock.json
index dd42f0b36..0673bb27c 100644
--- a/ecosystem-tests/cloudflare-worker/package-lock.json
+++ b/ecosystem-tests/cloudflare-worker/package-lock.json
@@ -671,9 +671,9 @@
}
},
"node_modules/@cloudflare/workerd-darwin-64": {
- "version": "1.20230814.1",
- "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20230814.1.tgz",
- "integrity": "sha512-aQUO7q7qXl+SVtOiMMlVKLNOSeL6GX43RKeflwzsD74dGgyHPiSfw5KCvXhkVbyN7u+yYF6HyFdaIvHLfn5jyA==",
+ "version": "1.20231030.0",
+ "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20231030.0.tgz",
+ "integrity": "sha512-J4PQ9utPxLya9yHdMMx3AZeC5M/6FxcoYw6jo9jbDDFTy+a4Gslqf4Im9We3aeOEdPXa3tgQHVQOSelJSZLhIw==",
"cpu": [
"x64"
],
@@ -687,9 +687,9 @@
}
},
"node_modules/@cloudflare/workerd-darwin-arm64": {
- "version": "1.20230814.1",
- "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20230814.1.tgz",
- "integrity": "sha512-U2mcgi+AiuI/4EY5Wk/GmygiNoCNw/V2mcHmxESqe4r6XbJYOzBdEsjnqJ05rqd0JlEM8m64jRtE6/qBnQHygg==",
+ "version": "1.20231030.0",
+ "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20231030.0.tgz",
+ "integrity": "sha512-WSJJjm11Del4hSneiNB7wTXGtBXI4QMCH9l5qf4iT5PAW8cESGcCmdHtWDWDtGAAGcvmLT04KNvmum92vRKKQQ==",
"cpu": [
"arm64"
],
@@ -703,9 +703,9 @@
}
},
"node_modules/@cloudflare/workerd-linux-64": {
- "version": "1.20230814.1",
- "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20230814.1.tgz",
- "integrity": "sha512-Q4kITXLTCuG2i2Z01fbb5AjVRRIf3+lS4ZVsFbTbIwtcOOG4Ozcw7ee7tKsFES7hFqR4Eg9gMG4/aS0mmi+L2g==",
+ "version": "1.20231030.0",
+ "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20231030.0.tgz",
+ "integrity": "sha512-2HUeRTvoCC17fxE0qdBeR7J9dO8j4A8ZbdcvY8pZxdk+zERU6+N03RTbk/dQMU488PwiDvcC3zZqS4gwLfVT8g==",
"cpu": [
"x64"
],
@@ -719,9 +719,9 @@
}
},
"node_modules/@cloudflare/workerd-linux-arm64": {
- "version": "1.20230814.1",
- "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20230814.1.tgz",
- "integrity": "sha512-BX5SaksXw+pkREVw3Rw2eSNXplqZw+14CcwW/5x/4oq/C6yn5qCvKxJfM7pukJGMI4wkJPOYops7B3g27FB/HA==",
+ "version": "1.20231030.0",
+ "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20231030.0.tgz",
+ "integrity": "sha512-4/GK5zHh+9JbUI6Z5xTCM0ZmpKKHk7vu9thmHjUxtz+o8Ne9DoD7DlDvXQWgMF6XGaTubDWyp3ttn+Qv8jDFuQ==",
"cpu": [
"arm64"
],
@@ -735,9 +735,9 @@
}
},
"node_modules/@cloudflare/workerd-windows-64": {
- "version": "1.20230814.1",
- "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20230814.1.tgz",
- "integrity": "sha512-GWHqfyhsG/1wm2W8afkYX3q3fWXUWWD8NGtHfAs6ZVTHdW3mmYyMhKR0lc6ptBwz5i5aXRlP2S+CxxxwwDbKpw==",
+ "version": "1.20231030.0",
+ "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20231030.0.tgz",
+ "integrity": "sha512-fb/Jgj8Yqy3PO1jLhk7mTrHMkR8jklpbQFud6rL/aMAn5d6MQbaSrYOCjzkKGp0Zng8D2LIzSl+Fc0C9Sggxjg==",
"cpu": [
"x64"
],
@@ -757,18 +757,18 @@
"dev": true
},
"node_modules/@esbuild-plugins/node-globals-polyfill": {
- "version": "0.1.1",
- "resolved": "https://registry.npmjs.org/@esbuild-plugins/node-globals-polyfill/-/node-globals-polyfill-0.1.1.tgz",
- "integrity": "sha512-MR0oAA+mlnJWrt1RQVQ+4VYuRJW/P2YmRTv1AsplObyvuBMnPHiizUF95HHYiSsMGLhyGtWufaq2XQg6+iurBg==",
+ "version": "0.2.3",
+ "resolved": "https://registry.npmjs.org/@esbuild-plugins/node-globals-polyfill/-/node-globals-polyfill-0.2.3.tgz",
+ "integrity": "sha512-r3MIryXDeXDOZh7ih1l/yE9ZLORCd5e8vWg02azWRGj5SPTuoh69A2AIyn0Z31V/kHBfZ4HgWJ+OK3GTTwLmnw==",
"dev": true,
"peerDependencies": {
"esbuild": "*"
}
},
"node_modules/@esbuild-plugins/node-modules-polyfill": {
- "version": "0.1.4",
- "resolved": "https://registry.npmjs.org/@esbuild-plugins/node-modules-polyfill/-/node-modules-polyfill-0.1.4.tgz",
- "integrity": "sha512-uZbcXi0zbmKC/050p3gJnne5Qdzw8vkXIv+c2BW0Lsc1ji1SkrxbKPUy5Efr0blbTu1SL8w4eyfpnSdPg3G0Qg==",
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/@esbuild-plugins/node-modules-polyfill/-/node-modules-polyfill-0.2.2.tgz",
+ "integrity": "sha512-LXV7QsWJxRuMYvKbiznh+U1ilIop3g2TeKRzUxOG5X3YITc8JyyTa90BmLwqqv0YnX4v32CSlG+vsziZp9dMvA==",
"dev": true,
"dependencies": {
"escape-string-regexp": "^4.0.0",
@@ -791,9 +791,9 @@
}
},
"node_modules/@esbuild/android-arm": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.16.3.tgz",
- "integrity": "sha512-mueuEoh+s1eRbSJqq9KNBQwI4QhQV6sRXIfTyLXSHGMpyew61rOK4qY21uKbXl1iBoMb0AdL1deWFCQVlN2qHA==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.17.19.tgz",
+ "integrity": "sha512-rIKddzqhmav7MSmoFCmDIb6e2W57geRsM94gV2l38fzhXMwq7hZoClug9USI2pFRGL06f4IOPHHpFNOkWieR8A==",
"cpu": [
"arm"
],
@@ -807,9 +807,9 @@
}
},
"node_modules/@esbuild/android-arm64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.16.3.tgz",
- "integrity": "sha512-RolFVeinkeraDvN/OoRf1F/lP0KUfGNb5jxy/vkIMeRRChkrX/HTYN6TYZosRJs3a1+8wqpxAo5PI5hFmxyPRg==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.17.19.tgz",
+ "integrity": "sha512-KBMWvEZooR7+kzY0BtbTQn0OAYY7CsiydT63pVEaPtVYF0hXbUaOyZog37DKxK7NF3XacBJOpYT4adIJh+avxA==",
"cpu": [
"arm64"
],
@@ -823,9 +823,9 @@
}
},
"node_modules/@esbuild/android-x64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.16.3.tgz",
- "integrity": "sha512-SFpTUcIT1bIJuCCBMCQWq1bL2gPTjWoLZdjmIhjdcQHaUfV41OQfho6Ici5uvvkMmZRXIUGpM3GxysP/EU7ifQ==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.17.19.tgz",
+ "integrity": "sha512-uUTTc4xGNDT7YSArp/zbtmbhO0uEEK9/ETW29Wk1thYUJBz3IVnvgEiEwEa9IeLyvnpKrWK64Utw2bgUmDveww==",
"cpu": [
"x64"
],
@@ -839,9 +839,9 @@
}
},
"node_modules/@esbuild/darwin-arm64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.16.3.tgz",
- "integrity": "sha512-DO8WykMyB+N9mIDfI/Hug70Dk1KipavlGAecxS3jDUwAbTpDXj0Lcwzw9svkhxfpCagDmpaTMgxWK8/C/XcXvw==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.17.19.tgz",
+ "integrity": "sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==",
"cpu": [
"arm64"
],
@@ -855,9 +855,9 @@
}
},
"node_modules/@esbuild/darwin-x64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.16.3.tgz",
- "integrity": "sha512-uEqZQ2omc6BvWqdCiyZ5+XmxuHEi1SPzpVxXCSSV2+Sh7sbXbpeNhHIeFrIpRjAs0lI1FmA1iIOxFozKBhKgRQ==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.17.19.tgz",
+ "integrity": "sha512-IJM4JJsLhRYr9xdtLytPLSH9k/oxR3boaUIYiHkAawtwNOXKE8KoU8tMvryogdcT8AU+Bflmh81Xn6Q0vTZbQw==",
"cpu": [
"x64"
],
@@ -871,9 +871,9 @@
}
},
"node_modules/@esbuild/freebsd-arm64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.16.3.tgz",
- "integrity": "sha512-nJansp3sSXakNkOD5i5mIz2Is/HjzIhFs49b1tjrPrpCmwgBmH9SSzhC/Z1UqlkivqMYkhfPwMw1dGFUuwmXhw==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.17.19.tgz",
+ "integrity": "sha512-pBwbc7DufluUeGdjSU5Si+P3SoMF5DQ/F/UmTSb8HXO80ZEAJmrykPyzo1IfNbAoaqw48YRpv8shwd1NoI0jcQ==",
"cpu": [
"arm64"
],
@@ -887,9 +887,9 @@
}
},
"node_modules/@esbuild/freebsd-x64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.16.3.tgz",
- "integrity": "sha512-TfoDzLw+QHfc4a8aKtGSQ96Wa+6eimljjkq9HKR0rHlU83vw8aldMOUSJTUDxbcUdcgnJzPaX8/vGWm7vyV7ug==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.17.19.tgz",
+ "integrity": "sha512-4lu+n8Wk0XlajEhbEffdy2xy53dpR06SlzvhGByyg36qJw6Kpfk7cp45DR/62aPH9mtJRmIyrXAS5UWBrJT6TQ==",
"cpu": [
"x64"
],
@@ -903,9 +903,9 @@
}
},
"node_modules/@esbuild/linux-arm": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.16.3.tgz",
- "integrity": "sha512-VwswmSYwVAAq6LysV59Fyqk3UIjbhuc6wb3vEcJ7HEJUtFuLK9uXWuFoH1lulEbE4+5GjtHi3MHX+w1gNHdOWQ==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.17.19.tgz",
+ "integrity": "sha512-cdmT3KxjlOQ/gZ2cjfrQOtmhG4HJs6hhvm3mWSRDPtZ/lP5oe8FWceS10JaSJC13GBd4eH/haHnqf7hhGNLerA==",
"cpu": [
"arm"
],
@@ -919,9 +919,9 @@
}
},
"node_modules/@esbuild/linux-arm64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.16.3.tgz",
- "integrity": "sha512-7I3RlsnxEFCHVZNBLb2w7unamgZ5sVwO0/ikE2GaYvYuUQs9Qte/w7TqWcXHtCwxvZx/2+F97ndiUQAWs47ZfQ==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.17.19.tgz",
+ "integrity": "sha512-ct1Tg3WGwd3P+oZYqic+YZF4snNl2bsnMKRkb3ozHmnM0dGWuxcPTTntAF6bOP0Sp4x0PjSF+4uHQ1xvxfRKqg==",
"cpu": [
"arm64"
],
@@ -935,9 +935,9 @@
}
},
"node_modules/@esbuild/linux-ia32": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.16.3.tgz",
- "integrity": "sha512-X8FDDxM9cqda2rJE+iblQhIMYY49LfvW4kaEjoFbTTQ4Go8G96Smj2w3BRTwA8IHGoi9dPOPGAX63dhuv19UqA==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.17.19.tgz",
+ "integrity": "sha512-w4IRhSy1VbsNxHRQpeGCHEmibqdTUx61Vc38APcsRbuVgK0OPEnQ0YD39Brymn96mOx48Y2laBQGqgZ0j9w6SQ==",
"cpu": [
"ia32"
],
@@ -951,9 +951,9 @@
}
},
"node_modules/@esbuild/linux-loong64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.16.3.tgz",
- "integrity": "sha512-hIbeejCOyO0X9ujfIIOKjBjNAs9XD/YdJ9JXAy1lHA+8UXuOqbFe4ErMCqMr8dhlMGBuvcQYGF7+kO7waj2KHw==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.17.19.tgz",
+ "integrity": "sha512-2iAngUbBPMq439a+z//gE+9WBldoMp1s5GWsUSgqHLzLJ9WoZLZhpwWuym0u0u/4XmZ3gpHmzV84PonE+9IIdQ==",
"cpu": [
"loong64"
],
@@ -967,9 +967,9 @@
}
},
"node_modules/@esbuild/linux-mips64el": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.16.3.tgz",
- "integrity": "sha512-znFRzICT/V8VZQMt6rjb21MtAVJv/3dmKRMlohlShrbVXdBuOdDrGb+C2cZGQAR8RFyRe7HS6klmHq103WpmVw==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.17.19.tgz",
+ "integrity": "sha512-LKJltc4LVdMKHsrFe4MGNPp0hqDFA1Wpt3jE1gEyM3nKUvOiO//9PheZZHfYRfYl6AwdTH4aTcXSqBerX0ml4A==",
"cpu": [
"mips64el"
],
@@ -983,9 +983,9 @@
}
},
"node_modules/@esbuild/linux-ppc64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.16.3.tgz",
- "integrity": "sha512-EV7LuEybxhXrVTDpbqWF2yehYRNz5e5p+u3oQUS2+ZFpknyi1NXxr8URk4ykR8Efm7iu04//4sBg249yNOwy5Q==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.17.19.tgz",
+ "integrity": "sha512-/c/DGybs95WXNS8y3Ti/ytqETiW7EU44MEKuCAcpPto3YjQbyK3IQVKfF6nbghD7EcLUGl0NbiL5Rt5DMhn5tg==",
"cpu": [
"ppc64"
],
@@ -999,9 +999,9 @@
}
},
"node_modules/@esbuild/linux-riscv64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.16.3.tgz",
- "integrity": "sha512-uDxqFOcLzFIJ+r/pkTTSE9lsCEaV/Y6rMlQjUI9BkzASEChYL/aSQjZjchtEmdnVxDKETnUAmsaZ4pqK1eE5BQ==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.17.19.tgz",
+ "integrity": "sha512-FC3nUAWhvFoutlhAkgHf8f5HwFWUL6bYdvLc/TTuxKlvLi3+pPzdZiFKSWz/PF30TB1K19SuCxDTI5KcqASJqA==",
"cpu": [
"riscv64"
],
@@ -1015,9 +1015,9 @@
}
},
"node_modules/@esbuild/linux-s390x": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.16.3.tgz",
- "integrity": "sha512-NbeREhzSxYwFhnCAQOQZmajsPYtX71Ufej3IQ8W2Gxskfz9DK58ENEju4SbpIj48VenktRASC52N5Fhyf/aliQ==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.17.19.tgz",
+ "integrity": "sha512-IbFsFbxMWLuKEbH+7sTkKzL6NJmG2vRyy6K7JJo55w+8xDk7RElYn6xvXtDW8HCfoKBFK69f3pgBJSUSQPr+4Q==",
"cpu": [
"s390x"
],
@@ -1031,9 +1031,9 @@
}
},
"node_modules/@esbuild/linux-x64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.16.3.tgz",
- "integrity": "sha512-SDiG0nCixYO9JgpehoKgScwic7vXXndfasjnD5DLbp1xltANzqZ425l7LSdHynt19UWOcDjG9wJJzSElsPvk0w==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.17.19.tgz",
+ "integrity": "sha512-68ngA9lg2H6zkZcyp22tsVt38mlhWde8l3eJLWkyLrp4HwMUr3c1s/M2t7+kHIhvMjglIBrFpncX1SzMckomGw==",
"cpu": [
"x64"
],
@@ -1047,9 +1047,9 @@
}
},
"node_modules/@esbuild/netbsd-x64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.16.3.tgz",
- "integrity": "sha512-AzbsJqiHEq1I/tUvOfAzCY15h4/7Ivp3ff/o1GpP16n48JMNAtbW0qui2WCgoIZArEHD0SUQ95gvR0oSO7ZbdA==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.17.19.tgz",
+ "integrity": "sha512-CwFq42rXCR8TYIjIfpXCbRX0rp1jo6cPIUPSaWwzbVI4aOfX96OXY8M6KNmtPcg7QjYeDmN+DD0Wp3LaBOLf4Q==",
"cpu": [
"x64"
],
@@ -1063,9 +1063,9 @@
}
},
"node_modules/@esbuild/openbsd-x64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.16.3.tgz",
- "integrity": "sha512-gSABi8qHl8k3Cbi/4toAzHiykuBuWLZs43JomTcXkjMZVkp0gj3gg9mO+9HJW/8GB5H89RX/V0QP4JGL7YEEVg==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.17.19.tgz",
+ "integrity": "sha512-cnq5brJYrSZ2CF6c35eCmviIN3k3RczmHz8eYaVlNasVqsNY+JKohZU5MKmaOI+KkllCdzOKKdPs762VCPC20g==",
"cpu": [
"x64"
],
@@ -1079,9 +1079,9 @@
}
},
"node_modules/@esbuild/sunos-x64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.16.3.tgz",
- "integrity": "sha512-SF9Kch5Ete4reovvRO6yNjMxrvlfT0F0Flm+NPoUw5Z4Q3r1d23LFTgaLwm3Cp0iGbrU/MoUI+ZqwCv5XJijCw==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.17.19.tgz",
+ "integrity": "sha512-vCRT7yP3zX+bKWFeP/zdS6SqdWB8OIpaRq/mbXQxTGHnIxspRtigpkUcDMlSCOejlHowLqII7K2JKevwyRP2rg==",
"cpu": [
"x64"
],
@@ -1095,9 +1095,9 @@
}
},
"node_modules/@esbuild/win32-arm64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.16.3.tgz",
- "integrity": "sha512-u5aBonZIyGopAZyOnoPAA6fGsDeHByZ9CnEzyML9NqntK6D/xl5jteZUKm/p6nD09+v3pTM6TuUIqSPcChk5gg==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.17.19.tgz",
+ "integrity": "sha512-yYx+8jwowUstVdorcMdNlzklLYhPxjniHWFKgRqH7IFlUEa0Umu3KuYplf1HUZZ422e3NU9F4LGb+4O0Kdcaag==",
"cpu": [
"arm64"
],
@@ -1111,9 +1111,9 @@
}
},
"node_modules/@esbuild/win32-ia32": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.16.3.tgz",
- "integrity": "sha512-GlgVq1WpvOEhNioh74TKelwla9KDuAaLZrdxuuUgsP2vayxeLgVc+rbpIv0IYF4+tlIzq2vRhofV+KGLD+37EQ==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.17.19.tgz",
+ "integrity": "sha512-eggDKanJszUtCdlVs0RB+h35wNlb5v4TWEkq4vZcmVt5u/HiDZrTXe2bWFQUez3RgNHwx/x4sk5++4NSSicKkw==",
"cpu": [
"ia32"
],
@@ -1127,9 +1127,9 @@
}
},
"node_modules/@esbuild/win32-x64": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.16.3.tgz",
- "integrity": "sha512-5/JuTd8OWW8UzEtyf19fbrtMJENza+C9JoPIkvItgTBQ1FO2ZLvjbPO6Xs54vk0s5JB5QsfieUEshRQfu7ZHow==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.17.19.tgz",
+ "integrity": "sha512-lAhycmKnVOuRYNtRtatQR1LPQf2oYCkRGkSFnseDAKPl8lu5SOsK/e1sXe5a0Pc5kHIHe6P2I/ilntNv2xf3cA==",
"cpu": [
"x64"
],
@@ -1881,37 +1881,6 @@
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
"dev": true
},
- "node_modules/base64-js": {
- "version": "1.5.1",
- "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
- "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
- "dev": true,
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/feross"
- },
- {
- "type": "patreon",
- "url": "https://www.patreon.com/feross"
- },
- {
- "type": "consulting",
- "url": "https://feross.org/support"
- }
- ]
- },
- "node_modules/better-sqlite3": {
- "version": "8.5.2",
- "resolved": "https://registry.npmjs.org/better-sqlite3/-/better-sqlite3-8.5.2.tgz",
- "integrity": "sha512-w/EZ/jwuZF+/47mAVC2+rhR2X/gwkZ+fd1pbX7Y90D5NRaRzDQcxrHY10t6ijGiYIonCVsBSF5v1cay07bP5sg==",
- "dev": true,
- "hasInstallScript": true,
- "dependencies": {
- "bindings": "^1.5.0",
- "prebuild-install": "^7.1.0"
- }
- },
"node_modules/binary-extensions": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
@@ -1921,26 +1890,6 @@
"node": ">=8"
}
},
- "node_modules/bindings": {
- "version": "1.5.0",
- "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz",
- "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==",
- "dev": true,
- "dependencies": {
- "file-uri-to-path": "1.0.0"
- }
- },
- "node_modules/bl": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz",
- "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==",
- "dev": true,
- "dependencies": {
- "buffer": "^5.5.0",
- "inherits": "^2.0.4",
- "readable-stream": "^3.4.0"
- }
- },
"node_modules/blake3-wasm": {
"version": "2.1.5",
"resolved": "https://registry.npmjs.org/blake3-wasm/-/blake3-wasm-2.1.5.tgz",
@@ -2028,30 +1977,6 @@
"node-int64": "^0.4.0"
}
},
- "node_modules/buffer": {
- "version": "5.7.1",
- "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz",
- "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==",
- "dev": true,
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/feross"
- },
- {
- "type": "patreon",
- "url": "https://www.patreon.com/feross"
- },
- {
- "type": "consulting",
- "url": "https://feross.org/support"
- }
- ],
- "dependencies": {
- "base64-js": "^1.3.1",
- "ieee754": "^1.1.13"
- }
- },
"node_modules/buffer-from": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
@@ -2179,12 +2104,6 @@
"fsevents": "~2.3.2"
}
},
- "node_modules/chownr": {
- "version": "1.1.4",
- "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
- "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==",
- "dev": true
- },
"node_modules/ci-info": {
"version": "3.8.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz",
@@ -2347,21 +2266,6 @@
}
}
},
- "node_modules/decompress-response": {
- "version": "6.0.0",
- "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz",
- "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==",
- "dev": true,
- "dependencies": {
- "mimic-response": "^3.1.0"
- },
- "engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
"node_modules/dedent": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.1.tgz",
@@ -2376,15 +2280,6 @@
}
}
},
- "node_modules/deep-extend": {
- "version": "0.6.0",
- "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
- "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
- "dev": true,
- "engines": {
- "node": ">=4.0.0"
- }
- },
"node_modules/deepmerge": {
"version": "4.3.1",
"resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz",
@@ -2403,15 +2298,6 @@
"node": ">=0.4.0"
}
},
- "node_modules/detect-libc": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz",
- "integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==",
- "dev": true,
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/detect-newline": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz",
@@ -2460,15 +2346,6 @@
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true
},
- "node_modules/end-of-stream": {
- "version": "1.4.4",
- "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
- "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
- "dev": true,
- "dependencies": {
- "once": "^1.4.0"
- }
- },
"node_modules/error-ex": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
@@ -2479,9 +2356,9 @@
}
},
"node_modules/esbuild": {
- "version": "0.16.3",
- "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.16.3.tgz",
- "integrity": "sha512-71f7EjPWTiSguen8X/kxEpkAS7BFHwtQKisCDDV3Y4GLGWBaoSCyD5uXkaUew6JDzA9FEN1W23mdnSwW9kqCeg==",
+ "version": "0.17.19",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.17.19.tgz",
+ "integrity": "sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==",
"dev": true,
"hasInstallScript": true,
"bin": {
@@ -2491,28 +2368,28 @@
"node": ">=12"
},
"optionalDependencies": {
- "@esbuild/android-arm": "0.16.3",
- "@esbuild/android-arm64": "0.16.3",
- "@esbuild/android-x64": "0.16.3",
- "@esbuild/darwin-arm64": "0.16.3",
- "@esbuild/darwin-x64": "0.16.3",
- "@esbuild/freebsd-arm64": "0.16.3",
- "@esbuild/freebsd-x64": "0.16.3",
- "@esbuild/linux-arm": "0.16.3",
- "@esbuild/linux-arm64": "0.16.3",
- "@esbuild/linux-ia32": "0.16.3",
- "@esbuild/linux-loong64": "0.16.3",
- "@esbuild/linux-mips64el": "0.16.3",
- "@esbuild/linux-ppc64": "0.16.3",
- "@esbuild/linux-riscv64": "0.16.3",
- "@esbuild/linux-s390x": "0.16.3",
- "@esbuild/linux-x64": "0.16.3",
- "@esbuild/netbsd-x64": "0.16.3",
- "@esbuild/openbsd-x64": "0.16.3",
- "@esbuild/sunos-x64": "0.16.3",
- "@esbuild/win32-arm64": "0.16.3",
- "@esbuild/win32-ia32": "0.16.3",
- "@esbuild/win32-x64": "0.16.3"
+ "@esbuild/android-arm": "0.17.19",
+ "@esbuild/android-arm64": "0.17.19",
+ "@esbuild/android-x64": "0.17.19",
+ "@esbuild/darwin-arm64": "0.17.19",
+ "@esbuild/darwin-x64": "0.17.19",
+ "@esbuild/freebsd-arm64": "0.17.19",
+ "@esbuild/freebsd-x64": "0.17.19",
+ "@esbuild/linux-arm": "0.17.19",
+ "@esbuild/linux-arm64": "0.17.19",
+ "@esbuild/linux-ia32": "0.17.19",
+ "@esbuild/linux-loong64": "0.17.19",
+ "@esbuild/linux-mips64el": "0.17.19",
+ "@esbuild/linux-ppc64": "0.17.19",
+ "@esbuild/linux-riscv64": "0.17.19",
+ "@esbuild/linux-s390x": "0.17.19",
+ "@esbuild/linux-x64": "0.17.19",
+ "@esbuild/netbsd-x64": "0.17.19",
+ "@esbuild/openbsd-x64": "0.17.19",
+ "@esbuild/sunos-x64": "0.17.19",
+ "@esbuild/win32-arm64": "0.17.19",
+ "@esbuild/win32-ia32": "0.17.19",
+ "@esbuild/win32-x64": "0.17.19"
}
},
"node_modules/escalade": {
@@ -2611,15 +2488,6 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/expand-template": {
- "version": "2.0.3",
- "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz",
- "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==",
- "dev": true,
- "engines": {
- "node": ">=6"
- }
- },
"node_modules/expect": {
"version": "29.7.0",
"resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz",
@@ -2682,12 +2550,6 @@
"node": "^12.20 || >= 14.13"
}
},
- "node_modules/file-uri-to-path": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz",
- "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==",
- "dev": true
- },
"node_modules/fill-range": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
@@ -2764,12 +2626,6 @@
"integrity": "sha512-twe20eF1OxVxp/ML/kq2p1uc6KvFK/+vs8WjEbeKmV2He22MKm7YF2ANIt+EOqhJ5L3K/SuuPhk0hWQDjOM23g==",
"dev": true
},
- "node_modules/fs-constants": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz",
- "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==",
- "dev": true
- },
"node_modules/fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
@@ -2854,12 +2710,6 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/github-from-package": {
- "version": "0.0.0",
- "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz",
- "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==",
- "dev": true
- },
"node_modules/glob": {
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
@@ -2940,12 +2790,6 @@
"integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==",
"dev": true
},
- "node_modules/http-cache-semantics": {
- "version": "4.1.1",
- "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz",
- "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==",
- "dev": true
- },
"node_modules/human-signals": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
@@ -2955,26 +2799,6 @@
"node": ">=10.17.0"
}
},
- "node_modules/ieee754": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
- "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==",
- "dev": true,
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/feross"
- },
- {
- "type": "patreon",
- "url": "https://www.patreon.com/feross"
- },
- {
- "type": "consulting",
- "url": "https://feross.org/support"
- }
- ]
- },
"node_modules/import-local": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz",
@@ -3019,12 +2843,6 @@
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"dev": true
},
- "node_modules/ini": {
- "version": "1.3.8",
- "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
- "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==",
- "dev": true
- },
"node_modules/is-arrayish": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
@@ -4075,54 +3893,32 @@
"node": ">=6"
}
},
- "node_modules/mimic-response": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz",
- "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==",
- "dev": true,
- "engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
"node_modules/miniflare": {
- "version": "3.20230814.1",
- "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-3.20230814.1.tgz",
- "integrity": "sha512-LMgqd1Ut0+fnlvQepVbbBYQczQnyuuap8bgUwOyPETka0S9NR9NxMQSNaBgVZ0uOaG7xMJ/OVTRlz+TGB86PWA==",
+ "version": "3.20231030.3",
+ "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-3.20231030.3.tgz",
+ "integrity": "sha512-lquHSh0XiO8uoWDujOLHtDS9mkUTJTc5C5amiQ6A++5y0f+DWiMqbDBvvwjlYf4Dvqk6ChFya9dztk7fg2ZVxA==",
"dev": true,
"dependencies": {
"acorn": "^8.8.0",
"acorn-walk": "^8.2.0",
- "better-sqlite3": "^8.1.0",
"capnp-ts": "^0.7.0",
"exit-hook": "^2.2.1",
"glob-to-regexp": "^0.4.1",
- "http-cache-semantics": "^4.1.0",
- "kleur": "^4.1.5",
- "set-cookie-parser": "^2.6.0",
"source-map-support": "0.5.21",
"stoppable": "^1.1.0",
- "undici": "^5.13.0",
- "workerd": "1.20230814.1",
+ "undici": "^5.22.1",
+ "workerd": "1.20231030.0",
"ws": "^8.11.0",
"youch": "^3.2.2",
"zod": "^3.20.6"
},
+ "bin": {
+ "miniflare": "bootstrap.js"
+ },
"engines": {
"node": ">=16.13"
}
},
- "node_modules/miniflare/node_modules/kleur": {
- "version": "4.1.5",
- "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz",
- "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==",
- "dev": true,
- "engines": {
- "node": ">=6"
- }
- },
"node_modules/miniflare/node_modules/source-map-support": {
"version": "0.5.21",
"resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
@@ -4154,12 +3950,6 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/mkdirp-classic": {
- "version": "0.5.3",
- "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz",
- "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==",
- "dev": true
- },
"node_modules/ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
@@ -4193,63 +3983,12 @@
"node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
}
},
- "node_modules/napi-build-utils": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-1.0.2.tgz",
- "integrity": "sha512-ONmRUqK7zj7DWX0D9ADe03wbwOBZxNAfF20PlGfCWQcD3+/MakShIHrMqx9YwPTfxDdF1zLeL+RGZiR9kGMLdg==",
- "dev": true
- },
"node_modules/natural-compare": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
"integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==",
"dev": true
},
- "node_modules/node-abi": {
- "version": "3.47.0",
- "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.47.0.tgz",
- "integrity": "sha512-2s6B2CWZM//kPgwnuI0KrYwNjfdByE25zvAaEpq9IH4zcNsarH8Ihu/UuX6XMPEogDAxkuUFeZn60pXNHAqn3A==",
- "dev": true,
- "dependencies": {
- "semver": "^7.3.5"
- },
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/node-abi/node_modules/lru-cache": {
- "version": "6.0.0",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
- "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
- "dev": true,
- "dependencies": {
- "yallist": "^4.0.0"
- },
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/node-abi/node_modules/semver": {
- "version": "7.5.4",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
- "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
- "dev": true,
- "dependencies": {
- "lru-cache": "^6.0.0"
- },
- "bin": {
- "semver": "bin/semver.js"
- },
- "engines": {
- "node": ">=10"
- }
- },
- "node_modules/node-abi/node_modules/yallist": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
- "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
- "dev": true
- },
"node_modules/node-domexception": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
@@ -4507,32 +4246,6 @@
"node": ">=8"
}
},
- "node_modules/prebuild-install": {
- "version": "7.1.1",
- "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.1.tgz",
- "integrity": "sha512-jAXscXWMcCK8GgCoHOfIr0ODh5ai8mj63L2nWrjuAgXE6tDyYGnx4/8o/rCgU+B4JSyZBKbeZqzhtwtC3ovxjw==",
- "dev": true,
- "dependencies": {
- "detect-libc": "^2.0.0",
- "expand-template": "^2.0.3",
- "github-from-package": "0.0.0",
- "minimist": "^1.2.3",
- "mkdirp-classic": "^0.5.3",
- "napi-build-utils": "^1.0.1",
- "node-abi": "^3.3.0",
- "pump": "^3.0.0",
- "rc": "^1.2.7",
- "simple-get": "^4.0.0",
- "tar-fs": "^2.0.0",
- "tunnel-agent": "^0.6.0"
- },
- "bin": {
- "prebuild-install": "bin.js"
- },
- "engines": {
- "node": ">=10"
- }
- },
"node_modules/pretty-format": {
"version": "29.7.0",
"resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz",
@@ -4599,16 +4312,6 @@
"node": ">= 0.10"
}
},
- "node_modules/pump": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
- "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
- "dev": true,
- "dependencies": {
- "end-of-stream": "^1.1.0",
- "once": "^1.3.1"
- }
- },
"node_modules/pure-rand": {
"version": "6.0.4",
"resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.0.4.tgz",
@@ -4625,50 +4328,12 @@
}
]
},
- "node_modules/rc": {
- "version": "1.2.8",
- "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
- "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
- "dev": true,
- "dependencies": {
- "deep-extend": "^0.6.0",
- "ini": "~1.3.0",
- "minimist": "^1.2.0",
- "strip-json-comments": "~2.0.1"
- },
- "bin": {
- "rc": "cli.js"
- }
- },
- "node_modules/rc/node_modules/strip-json-comments": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
- "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==",
- "dev": true,
- "engines": {
- "node": ">=0.10.0"
- }
- },
"node_modules/react-is": {
"version": "18.2.0",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz",
"integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==",
"dev": true
},
- "node_modules/readable-stream": {
- "version": "3.6.2",
- "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz",
- "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==",
- "dev": true,
- "dependencies": {
- "inherits": "^2.0.3",
- "string_decoder": "^1.1.1",
- "util-deprecate": "^1.0.1"
- },
- "engines": {
- "node": ">= 6"
- }
- },
"node_modules/readdirp": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
@@ -4776,26 +4441,6 @@
"tslib": "^2.1.0"
}
},
- "node_modules/safe-buffer": {
- "version": "5.2.1",
- "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
- "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
- "dev": true,
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/feross"
- },
- {
- "type": "patreon",
- "url": "https://www.patreon.com/feross"
- },
- {
- "type": "consulting",
- "url": "https://feross.org/support"
- }
- ]
- },
"node_modules/selfsigned": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.1.1.tgz",
@@ -4817,12 +4462,6 @@
"semver": "bin/semver.js"
}
},
- "node_modules/set-cookie-parser": {
- "version": "2.6.0",
- "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.6.0.tgz",
- "integrity": "sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==",
- "dev": true
- },
"node_modules/shebang-command": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
@@ -4850,51 +4489,6 @@
"integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
"dev": true
},
- "node_modules/simple-concat": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz",
- "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==",
- "dev": true,
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/feross"
- },
- {
- "type": "patreon",
- "url": "https://www.patreon.com/feross"
- },
- {
- "type": "consulting",
- "url": "https://feross.org/support"
- }
- ]
- },
- "node_modules/simple-get": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz",
- "integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==",
- "dev": true,
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/feross"
- },
- {
- "type": "patreon",
- "url": "https://www.patreon.com/feross"
- },
- {
- "type": "consulting",
- "url": "https://feross.org/support"
- }
- ],
- "dependencies": {
- "decompress-response": "^6.0.0",
- "once": "^1.3.1",
- "simple-concat": "^1.0.0"
- }
- },
"node_modules/sisteransi": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz",
@@ -5028,15 +4622,6 @@
"node": ">=10.0.0"
}
},
- "node_modules/string_decoder": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
- "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
- "dev": true,
- "dependencies": {
- "safe-buffer": "~5.2.0"
- }
- },
"node_modules/string-length": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz",
@@ -5130,34 +4715,6 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/tar-fs": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz",
- "integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==",
- "dev": true,
- "dependencies": {
- "chownr": "^1.1.1",
- "mkdirp-classic": "^0.5.2",
- "pump": "^3.0.0",
- "tar-stream": "^2.1.4"
- }
- },
- "node_modules/tar-stream": {
- "version": "2.2.0",
- "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz",
- "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==",
- "dev": true,
- "dependencies": {
- "bl": "^4.0.3",
- "end-of-stream": "^1.4.1",
- "fs-constants": "^1.0.0",
- "inherits": "^2.0.3",
- "readable-stream": "^3.1.1"
- },
- "engines": {
- "node": ">=6"
- }
- },
"node_modules/test-exclude": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz",
@@ -5287,18 +4844,6 @@
"integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==",
"dev": true
},
- "node_modules/tunnel-agent": {
- "version": "0.6.0",
- "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
- "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==",
- "dev": true,
- "dependencies": {
- "safe-buffer": "^5.0.1"
- },
- "engines": {
- "node": "*"
- }
- },
"node_modules/type-detect": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
@@ -5375,12 +4920,6 @@
"browserslist": ">= 4.21.0"
}
},
- "node_modules/util-deprecate": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
- "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
- "dev": true
- },
"node_modules/v8-to-istanbul": {
"version": "9.2.0",
"resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.2.0.tgz",
@@ -5447,9 +4986,9 @@
}
},
"node_modules/workerd": {
- "version": "1.20230814.1",
- "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20230814.1.tgz",
- "integrity": "sha512-zJeSEteXuAD+bpYJT8WvzTAHvIAkKPVxOV+Jy6zCLKz5e08N3OUbAF+wrvGWc8b2aB1sj+IYsdXfkv4puH+qXQ==",
+ "version": "1.20231030.0",
+ "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20231030.0.tgz",
+ "integrity": "sha512-+FSW+d31f8RrjHanFf/R9A+Z0csf3OtsvzdPmAKuwuZm/5HrBv83cvG9fFeTxl7/nI6irUUXIRF9xcj/NomQzQ==",
"dev": true,
"hasInstallScript": true,
"bin": {
@@ -5459,30 +4998,32 @@
"node": ">=16"
},
"optionalDependencies": {
- "@cloudflare/workerd-darwin-64": "1.20230814.1",
- "@cloudflare/workerd-darwin-arm64": "1.20230814.1",
- "@cloudflare/workerd-linux-64": "1.20230814.1",
- "@cloudflare/workerd-linux-arm64": "1.20230814.1",
- "@cloudflare/workerd-windows-64": "1.20230814.1"
+ "@cloudflare/workerd-darwin-64": "1.20231030.0",
+ "@cloudflare/workerd-darwin-arm64": "1.20231030.0",
+ "@cloudflare/workerd-linux-64": "1.20231030.0",
+ "@cloudflare/workerd-linux-arm64": "1.20231030.0",
+ "@cloudflare/workerd-windows-64": "1.20231030.0"
}
},
"node_modules/wrangler": {
- "version": "3.6.0",
- "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-3.6.0.tgz",
- "integrity": "sha512-GWs4+gIUK+086svW/TgFhhxxrl/hdW2L7WASbdc10dJT7yFmCXse0SnHiqWUxbFu3ScP2t3a3LszJ08wwolWHg==",
+ "version": "3.19.0",
+ "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-3.19.0.tgz",
+ "integrity": "sha512-pY7xWqkQn6DJ+1vz9YHz2pCftEmK+JCTj9sqnucp0NZnlUiILDmBWegsjjCLZycgfiA62J213N7NvjLPr2LB8w==",
"dev": true,
"dependencies": {
"@cloudflare/kv-asset-handler": "^0.2.0",
- "@esbuild-plugins/node-globals-polyfill": "^0.1.1",
- "@esbuild-plugins/node-modules-polyfill": "^0.1.4",
+ "@esbuild-plugins/node-globals-polyfill": "^0.2.3",
+ "@esbuild-plugins/node-modules-polyfill": "^0.2.2",
"blake3-wasm": "^2.1.5",
"chokidar": "^3.5.3",
- "esbuild": "0.16.3",
- "miniflare": "3.20230814.1",
+ "esbuild": "0.17.19",
+ "miniflare": "3.20231030.3",
"nanoid": "^3.3.3",
"path-to-regexp": "^6.2.0",
+ "resolve.exports": "^2.0.2",
"selfsigned": "^2.0.1",
- "source-map": "^0.7.4",
+ "source-map": "0.6.1",
+ "source-map-support": "0.5.21",
"xxhash-wasm": "^1.0.1"
},
"bin": {
@@ -5490,19 +5031,20 @@
"wrangler2": "bin/wrangler.js"
},
"engines": {
- "node": ">=16.13.0"
+ "node": ">=16.17.0"
},
"optionalDependencies": {
"fsevents": "~2.3.2"
}
},
- "node_modules/wrangler/node_modules/source-map": {
- "version": "0.7.4",
- "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz",
- "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==",
+ "node_modules/wrangler/node_modules/source-map-support": {
+ "version": "0.5.21",
+ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
+ "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==",
"dev": true,
- "engines": {
- "node": ">= 8"
+ "dependencies": {
+ "buffer-from": "^1.0.0",
+ "source-map": "^0.6.0"
}
},
"node_modules/wrap-ansi": {
From e2d5d2bcac0aaa48c98931381a1fdc53c16c73f0 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 4 Apr 2024 07:32:06 -0400
Subject: [PATCH 052/533] fix(streaming): handle special line characters and
fix multi-byte character decoding (#757)
---
src/streaming.ts | 120 +++++++++++++++-----
tests/streaming.test.ts | 245 +++++++++++++++++++++++++++++++++++++++-
2 files changed, 338 insertions(+), 27 deletions(-)
diff --git a/src/streaming.ts b/src/streaming.ts
index 6b0f2a345..722a8f69c 100644
--- a/src/streaming.ts
+++ b/src/streaming.ts
@@ -23,29 +23,6 @@ export class Stream- implements AsyncIterable
- {
static fromSSEResponse
- (response: Response, controller: AbortController) {
let consumed = false;
- const decoder = new SSEDecoder();
-
- async function* iterMessages(): AsyncGenerator {
- if (!response.body) {
- controller.abort();
- throw new OpenAIError(`Attempted to iterate over a response with no body`);
- }
-
- const lineDecoder = new LineDecoder();
-
- const iter = readableStreamAsyncIterable(response.body);
- for await (const chunk of iter) {
- for (const line of lineDecoder.decode(chunk)) {
- const sse = decoder.decode(line);
- if (sse) yield sse;
- }
- }
-
- for (const line of lineDecoder.flush()) {
- const sse = decoder.decode(line);
- if (sse) yield sse;
- }
- }
async function* iterator(): AsyncIterator
- {
if (consumed) {
@@ -54,7 +31,7 @@ export class Stream
- implements AsyncIterable
- {
consumed = true;
let done = false;
try {
- for await (const sse of iterMessages()) {
+ for await (const sse of _iterSSEMessages(response, controller)) {
if (done) continue;
if (sse.data.startsWith('[DONE]')) {
@@ -220,6 +197,97 @@ export class Stream
- implements AsyncIterable
- {
}
}
+export async function* _iterSSEMessages(
+ response: Response,
+ controller: AbortController,
+): AsyncGenerator {
+ if (!response.body) {
+ controller.abort();
+ throw new OpenAIError(`Attempted to iterate over a response with no body`);
+ }
+
+ const sseDecoder = new SSEDecoder();
+ const lineDecoder = new LineDecoder();
+
+ const iter = readableStreamAsyncIterable(response.body);
+ for await (const sseChunk of iterSSEChunks(iter)) {
+ for (const line of lineDecoder.decode(sseChunk)) {
+ const sse = sseDecoder.decode(line);
+ if (sse) yield sse;
+ }
+ }
+
+ for (const line of lineDecoder.flush()) {
+ const sse = sseDecoder.decode(line);
+ if (sse) yield sse;
+ }
+}
+
+/**
+ * Given an async iterable iterator, iterates over it and yields full
+ * SSE chunks, i.e. yields when a double new-line is encountered.
+ */
+async function* iterSSEChunks(iterator: AsyncIterableIterator): AsyncGenerator {
+ let data = new Uint8Array();
+
+ for await (const chunk of iterator) {
+ if (chunk == null) {
+ continue;
+ }
+
+ const binaryChunk =
+ chunk instanceof ArrayBuffer ? new Uint8Array(chunk)
+ : typeof chunk === 'string' ? new TextEncoder().encode(chunk)
+ : chunk;
+
+ let newData = new Uint8Array(data.length + binaryChunk.length);
+ newData.set(data);
+ newData.set(binaryChunk, data.length);
+ data = newData;
+
+ let patternIndex;
+ while ((patternIndex = findDoubleNewlineIndex(data)) !== -1) {
+ yield data.slice(0, patternIndex);
+ data = data.slice(patternIndex);
+ }
+ }
+
+ if (data.length > 0) {
+ yield data;
+ }
+}
+
+function findDoubleNewlineIndex(buffer: Uint8Array): number {
+ // This function searches the buffer for the end patterns (\r\r, \n\n, \r\n\r\n)
+ // and returns the index right after the first occurrence of any pattern,
+ // or -1 if none of the patterns are found.
+ const newline = 0x0a; // \n
+ const carriage = 0x0d; // \r
+
+ for (let i = 0; i < buffer.length - 2; i++) {
+ if (buffer[i] === newline && buffer[i + 1] === newline) {
+ // \n\n
+ return i + 2;
+ }
+ if (buffer[i] === carriage && buffer[i + 1] === carriage) {
+ // \r\r
+ return i + 2;
+ }
+ if (
+ buffer[i] === carriage &&
+ buffer[i + 1] === newline &&
+ i + 3 < buffer.length &&
+ buffer[i + 2] === carriage &&
+ buffer[i + 3] === newline
+ ) {
+ // \r\n\r\n
+ return i + 4;
+ }
+ }
+
+ return -1;
+}
+
class SSEDecoder {
private data: string[];
private event: string | null;
@@ -283,8 +351,8 @@ class SSEDecoder {
*/
class LineDecoder {
// prettier-ignore
- static NEWLINE_CHARS = new Set(['\n', '\r', '\x0b', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029']);
- static NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g;
+ static NEWLINE_CHARS = new Set(['\n', '\r']);
+ static NEWLINE_REGEXP = /\r\n|[\n\r]/g;
buffer: string[];
trailingCR: boolean;
diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts
index 479b2a341..6fe9a5781 100644
--- a/tests/streaming.test.ts
+++ b/tests/streaming.test.ts
@@ -1,4 +1,7 @@
-import { _decodeChunks as decodeChunks } from 'openai/streaming';
+import { Response } from 'node-fetch';
+import { PassThrough } from 'stream';
+import assert from 'assert';
+import { _iterSSEMessages, _decodeChunks as decodeChunks } from 'openai/streaming';
describe('line decoder', () => {
test('basic', () => {
@@ -27,3 +30,243 @@ describe('line decoder', () => {
expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']);
});
});
+
+describe('streaming decoding', () => {
+ test('basic', async () => {
+ async function* body(): AsyncGenerator {
+ yield Buffer.from('event: completion\n');
+ yield Buffer.from('data: {"foo":true}\n');
+ yield Buffer.from('\n');
+ }
+
+ const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[
+ Symbol.asyncIterator
+ ]();
+
+ let event = await stream.next();
+ assert(event.value);
+ expect(JSON.parse(event.value.data)).toEqual({ foo: true });
+
+ event = await stream.next();
+ expect(event.done).toBeTruthy();
+ });
+
+ test('data without event', async () => {
+ async function* body(): AsyncGenerator {
+ yield Buffer.from('data: {"foo":true}\n');
+ yield Buffer.from('\n');
+ }
+
+ const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[
+ Symbol.asyncIterator
+ ]();
+
+ let event = await stream.next();
+ assert(event.value);
+ expect(event.value.event).toBeNull();
+ expect(JSON.parse(event.value.data)).toEqual({ foo: true });
+
+ event = await stream.next();
+ expect(event.done).toBeTruthy();
+ });
+
+ test('event without data', async () => {
+ async function* body(): AsyncGenerator {
+ yield Buffer.from('event: foo\n');
+ yield Buffer.from('\n');
+ }
+
+ const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[
+ Symbol.asyncIterator
+ ]();
+
+ let event = await stream.next();
+ assert(event.value);
+ expect(event.value.event).toEqual('foo');
+ expect(event.value.data).toEqual('');
+
+ event = await stream.next();
+ expect(event.done).toBeTruthy();
+ });
+
+ test('multiple events', async () => {
+ async function* body(): AsyncGenerator {
+ yield Buffer.from('event: foo\n');
+ yield Buffer.from('\n');
+ yield Buffer.from('event: ping\n');
+ yield Buffer.from('\n');
+ }
+
+ const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[
+ Symbol.asyncIterator
+ ]();
+
+ let event = await stream.next();
+ assert(event.value);
+ expect(event.value.event).toEqual('foo');
+ expect(event.value.data).toEqual('');
+
+ event = await stream.next();
+ assert(event.value);
+ expect(event.value.event).toEqual('ping');
+ expect(event.value.data).toEqual('');
+
+ event = await stream.next();
+ expect(event.done).toBeTruthy();
+ });
+
+ test('multiple events with data', async () => {
+ async function* body(): AsyncGenerator {
+ yield Buffer.from('event: foo\n');
+ yield Buffer.from('data: {"foo":true}\n');
+ yield Buffer.from('\n');
+ yield Buffer.from('event: ping\n');
+ yield Buffer.from('data: {"bar":false}\n');
+ yield Buffer.from('\n');
+ }
+
+ const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[
+ Symbol.asyncIterator
+ ]();
+
+ let event = await stream.next();
+ assert(event.value);
+ expect(event.value.event).toEqual('foo');
+ expect(JSON.parse(event.value.data)).toEqual({ foo: true });
+
+ event = await stream.next();
+ assert(event.value);
+ expect(event.value.event).toEqual('ping');
+ expect(JSON.parse(event.value.data)).toEqual({ bar: false });
+
+ event = await stream.next();
+ expect(event.done).toBeTruthy();
+ });
+
+ test('multiple data lines with empty line', async () => {
+ async function* body(): AsyncGenerator {
+ yield Buffer.from('event: ping\n');
+ yield Buffer.from('data: {\n');
+ yield Buffer.from('data: "foo":\n');
+ yield Buffer.from('data: \n');
+ yield Buffer.from('data:\n');
+ yield Buffer.from('data: true}\n');
+ yield Buffer.from('\n\n');
+ }
+
+ const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[
+ Symbol.asyncIterator
+ ]();
+
+ let event = await stream.next();
+ assert(event.value);
+ expect(event.value.event).toEqual('ping');
+ expect(JSON.parse(event.value.data)).toEqual({ foo: true });
+ expect(event.value.data).toEqual('{\n"foo":\n\n\ntrue}');
+
+ event = await stream.next();
+ expect(event.done).toBeTruthy();
+ });
+
+ test('data json escaped double new line', async () => {
+ async function* body(): AsyncGenerator {
+ yield Buffer.from('event: ping\n');
+ yield Buffer.from('data: {"foo": "my long\\n\\ncontent"}');
+ yield Buffer.from('\n\n');
+ }
+
+ const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[
+ Symbol.asyncIterator
+ ]();
+
+ let event = await stream.next();
+ assert(event.value);
+ expect(event.value.event).toEqual('ping');
+ expect(JSON.parse(event.value.data)).toEqual({ foo: 'my long\n\ncontent' });
+
+ event = await stream.next();
+ expect(event.done).toBeTruthy();
+ });
+
+ test('special new line characters', async () => {
+ async function* body(): AsyncGenerator {
+ yield Buffer.from('data: {"content": "culpa "}\n');
+ yield Buffer.from('\n');
+ yield Buffer.from('data: {"content": "');
+ yield Buffer.from([0xe2, 0x80, 0xa8]);
+ yield Buffer.from('"}\n');
+ yield Buffer.from('\n');
+ yield Buffer.from('data: {"content": "foo"}\n');
+ yield Buffer.from('\n');
+ }
+
+ const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[
+ Symbol.asyncIterator
+ ]();
+
+ let event = await stream.next();
+ assert(event.value);
+ expect(JSON.parse(event.value.data)).toEqual({ content: 'culpa ' });
+
+ event = await stream.next();
+ assert(event.value);
+ expect(JSON.parse(event.value.data)).toEqual({ content: Buffer.from([0xe2, 0x80, 0xa8]).toString() });
+
+ event = await stream.next();
+ assert(event.value);
+ expect(JSON.parse(event.value.data)).toEqual({ content: 'foo' });
+
+ event = await stream.next();
+ expect(event.done).toBeTruthy();
+ });
+
+ test('multi-byte characters across chunks', async () => {
+ async function* body(): AsyncGenerator {
+ yield Buffer.from('event: completion\n');
+ yield Buffer.from('data: {"content": "');
+ // bytes taken from the string 'известни' and arbitrarily split
+ // so that some multi-byte characters span multiple chunks
+ yield Buffer.from([0xd0]);
+ yield Buffer.from([0xb8, 0xd0, 0xb7, 0xd0]);
+ yield Buffer.from([0xb2, 0xd0, 0xb5, 0xd1, 0x81, 0xd1, 0x82, 0xd0, 0xbd, 0xd0, 0xb8]);
+ yield Buffer.from('"}\n');
+ yield Buffer.from('\n');
+ }
+
+ const stream = _iterSSEMessages(new Response(await iteratorToStream(body())), new AbortController())[
+ Symbol.asyncIterator
+ ]();
+
+ let event = await stream.next();
+ assert(event.value);
+ expect(event.value.event).toEqual('completion');
+ expect(JSON.parse(event.value.data)).toEqual({ content: 'известни' });
+
+ event = await stream.next();
+ expect(event.done).toBeTruthy();
+ });
+});
+
+async function iteratorToStream(iterator: AsyncGenerator): Promise {
+ const parts: unknown[] = [];
+
+ for await (const chunk of iterator) {
+ parts.push(chunk);
+ }
+
+ let index = 0;
+
+ const stream = new PassThrough({
+ read() {
+ const value = parts[index];
+ if (value === undefined) {
+ stream.end();
+ } else {
+ index += 1;
+ stream.write(value);
+ }
+ },
+ });
+
+ return stream;
+}
From 4999e9b691965c31c8979c5ce32fdb75c577dcf9 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 4 Apr 2024 07:32:26 -0400
Subject: [PATCH 053/533] release: 4.32.2
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 14 ++++++++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 19 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 27308d159..d6b720422 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.32.1"
+ ".": "4.32.2"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a1702ad3b..22748a5bd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,19 @@
# Changelog
+## 4.32.2 (2024-04-04)
+
+Full Changelog: [v4.32.1...v4.32.2](https://github.com/openai/openai-node/compare/v4.32.1...v4.32.2)
+
+### Bug Fixes
+
+* **streaming:** handle special line characters and fix multi-byte character decoding ([#757](https://github.com/openai/openai-node/issues/757)) ([8dcdda2](https://github.com/openai/openai-node/commit/8dcdda2b0d1d86486eea5fd47d24a8d26fde4c19))
+* **tests:** update wrangler to v3.19.0 (CVE-2023-7080) ([#755](https://github.com/openai/openai-node/issues/755)) ([47ca41d](https://github.com/openai/openai-node/commit/47ca41da9a739b2e04b721cb1fe843e5dd152465))
+
+
+### Chores
+
+* **tests:** bump ecosystem tests dependencies ([#753](https://github.com/openai/openai-node/issues/753)) ([3f86ea2](https://github.com/openai/openai-node/commit/3f86ea2205c90e05bcbe582491a4bed01075a5b1))
+
## 4.32.1 (2024-04-02)
Full Changelog: [v4.32.0...v4.32.1](https://github.com/openai/openai-node/compare/v4.32.0...v4.32.1)
diff --git a/README.md b/README.md
index aae0367b6..ba4b69838 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.32.1/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.32.2/mod.ts';
```
diff --git a/build-deno b/build-deno
index a56b6af13..8d0ee6da9 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.32.1/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.32.2/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index 4d87ed952..3d0107223 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.32.1",
+ "version": "4.32.2",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index c2e5453c3..ecc4c1a71 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.32.1'; // x-release-please-version
+export const VERSION = '4.32.2'; // x-release-please-version
From 4f38d4df907fe99f3757da6b58b422b4e663e67c Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 5 Apr 2024 08:36:22 -0400
Subject: [PATCH 054/533] feat(api): add additional messages when creating
thread run (#759)
---
src/resources/beta/threads/runs/runs.ts | 158 ++++++++++++++++++
.../beta/threads/runs/runs.test.ts | 5 +
2 files changed, 163 insertions(+)
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index 5dfc7d595..04234a74f 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -529,6 +529,11 @@ export interface RunCreateParamsBase {
*/
additional_instructions?: string | null;
+ /**
+ * Adds additional messages to the thread before creating the run.
+ */
+ additional_messages?: Array | null;
+
/**
* Overrides the
* [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
@@ -574,6 +579,39 @@ export interface RunCreateParamsBase {
}
export namespace RunCreateParams {
+ export interface AdditionalMessage {
+ /**
+ * The content of the message.
+ */
+ content: string;
+
+ /**
+ * The role of the entity that is creating the message. Allowed values include:
+ *
+ * - `user`: Indicates the message is sent by an actual user and should be used in
+ * most cases to represent user-generated messages.
+ * - `assistant`: Indicates the message is generated by the assistant. Use this
+ * value to insert messages from the assistant into the conversation.
+ */
+ role: 'user' | 'assistant';
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the message should use. There can be a maximum of 10 files attached to a
+ * message. Useful for tools like `retrieval` and `code_interpreter` that can
+ * access and use files.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+ }
+
export type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming;
export type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
}
@@ -637,6 +675,11 @@ export interface RunCreateAndPollParams {
*/
additional_instructions?: string | null;
+ /**
+ * Adds additional messages to the thread before creating the run.
+ */
+ additional_messages?: Array | null;
+
/**
* Overrides the
* [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
@@ -674,6 +717,41 @@ export interface RunCreateAndPollParams {
tools?: Array | null;
}
+export namespace RunCreateAndPollParams {
+ export interface AdditionalMessage {
+ /**
+ * The content of the message.
+ */
+ content: string;
+
+ /**
+ * The role of the entity that is creating the message. Allowed values include:
+ *
+ * - `user`: Indicates the message is sent by an actual user and should be used in
+ * most cases to represent user-generated messages.
+ * - `assistant`: Indicates the message is generated by the assistant. Use this
+ * value to insert messages from the assistant into the conversation.
+ */
+ role: 'user' | 'assistant';
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the message should use. There can be a maximum of 10 files attached to a
+ * message. Useful for tools like `retrieval` and `code_interpreter` that can
+ * access and use files.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+ }
+}
+
export interface RunCreateAndStreamParams {
/**
* The ID of the
@@ -689,6 +767,11 @@ export interface RunCreateAndStreamParams {
*/
additional_instructions?: string | null;
+ /**
+ * Adds additional messages to the thread before creating the run.
+ */
+ additional_messages?: Array | null;
+
/**
* Overrides the
* [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
@@ -726,6 +809,41 @@ export interface RunCreateAndStreamParams {
tools?: Array | null;
}
+export namespace RunCreateAndStreamParams {
+ export interface AdditionalMessage {
+ /**
+ * The content of the message.
+ */
+ content: string;
+
+ /**
+ * The role of the entity that is creating the message. Allowed values include:
+ *
+ * - `user`: Indicates the message is sent by an actual user and should be used in
+ * most cases to represent user-generated messages.
+ * - `assistant`: Indicates the message is generated by the assistant. Use this
+ * value to insert messages from the assistant into the conversation.
+ */
+ role: 'user' | 'assistant';
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the message should use. There can be a maximum of 10 files attached to a
+ * message. Useful for tools like `retrieval` and `code_interpreter` that can
+ * access and use files.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+ }
+}
+
export interface RunStreamParams {
/**
* The ID of the
@@ -741,6 +859,11 @@ export interface RunStreamParams {
*/
additional_instructions?: string | null;
+ /**
+ * Adds additional messages to the thread before creating the run.
+ */
+ additional_messages?: Array | null;
+
/**
* Overrides the
* [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
@@ -778,6 +901,41 @@ export interface RunStreamParams {
tools?: Array | null;
}
+export namespace RunStreamParams {
+ export interface AdditionalMessage {
+ /**
+ * The content of the message.
+ */
+ content: string;
+
+ /**
+ * The role of the entity that is creating the message. Allowed values include:
+ *
+ * - `user`: Indicates the message is sent by an actual user and should be used in
+ * most cases to represent user-generated messages.
+ * - `assistant`: Indicates the message is generated by the assistant. Use this
+ * value to insert messages from the assistant into the conversation.
+ */
+ role: 'user' | 'assistant';
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the message should use. There can be a maximum of 10 files attached to a
+ * message. Useful for tools like `retrieval` and `code_interpreter` that can
+ * access and use files.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+ }
+}
+
export type RunSubmitToolOutputsParams =
| RunSubmitToolOutputsParamsNonStreaming
| RunSubmitToolOutputsParamsStreaming;
diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts
index 5f17c1b58..2911cfd53 100644
--- a/tests/api-resources/beta/threads/runs/runs.test.ts
+++ b/tests/api-resources/beta/threads/runs/runs.test.ts
@@ -24,6 +24,11 @@ describe('resource runs', () => {
const response = await openai.beta.threads.runs.create('string', {
assistant_id: 'string',
additional_instructions: 'string',
+ additional_messages: [
+ { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
+ { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
+ { role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
+ ],
instructions: 'string',
metadata: {},
model: 'string',
From 018ac718ccf6a96798ef8f91906b3b652aa50919 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 5 Apr 2024 08:36:43 -0400
Subject: [PATCH 055/533] release: 4.33.0
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 8 ++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index d6b720422..e5b450ff3 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.32.2"
+ ".": "4.33.0"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 22748a5bd..f865d94f7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,13 @@
# Changelog
+## 4.33.0 (2024-04-05)
+
+Full Changelog: [v4.32.2...v4.33.0](https://github.com/openai/openai-node/compare/v4.32.2...v4.33.0)
+
+### Features
+
+* **api:** add additional messages when creating thread run ([#759](https://github.com/openai/openai-node/issues/759)) ([f1fdb41](https://github.com/openai/openai-node/commit/f1fdb410e087f9b94faeda0558de573ec1118601))
+
## 4.32.2 (2024-04-04)
Full Changelog: [v4.32.1...v4.32.2](https://github.com/openai/openai-node/compare/v4.32.1...v4.32.2)
diff --git a/README.md b/README.md
index ba4b69838..62c8967c6 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.32.2/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.33.0/mod.ts';
```
diff --git a/build-deno b/build-deno
index 8d0ee6da9..bbe96faae 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.32.2/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.33.0/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index 3d0107223..490a9e492 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.32.2",
+ "version": "4.33.0",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index ecc4c1a71..6726dc21c 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.32.2'; // x-release-please-version
+export const VERSION = '4.33.0'; // x-release-please-version
From fcf748dbbd23f972ff9fd81a8b2a35232a2d6e5c Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 11 Apr 2024 12:12:38 -0400
Subject: [PATCH 056/533] chore(internal): improve ecosystem tests (#761)
---
.gitignore | 5 +
.prettierignore | 2 +-
ecosystem-tests/cli.ts | 226 +++++++++++++++++++++++----
ecosystem-tests/deno/deno.jsonc | 4 +
ecosystem-tests/deno/deno.lock | 32 ++--
ecosystem-tests/deno/import_map.json | 6 -
ecosystem-tests/deno/main_test.ts | 5 +-
7 files changed, 229 insertions(+), 51 deletions(-)
delete mode 100644 ecosystem-tests/deno/import_map.json
diff --git a/.gitignore b/.gitignore
index 58b3944a1..31b12ac63 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,3 +5,8 @@ dist
/deno
/*.tgz
.idea/
+tmp
+.pack
+ecosystem-tests/deno/package.json
+ecosystem-tests/*/openai.tgz
+
diff --git a/.prettierignore b/.prettierignore
index fc6160fb1..3548c5af9 100644
--- a/.prettierignore
+++ b/.prettierignore
@@ -1,5 +1,5 @@
CHANGELOG.md
-/ecosystem-tests
+/ecosystem-tests/*/**
/node_modules
/deno
diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts
index c84c479d4..a3c1f27a4 100644
--- a/ecosystem-tests/cli.ts
+++ b/ecosystem-tests/cli.ts
@@ -5,16 +5,19 @@ import assert from 'assert';
import path from 'path';
const TAR_NAME = 'openai.tgz';
-const PACK_FILE = `.pack/${TAR_NAME}`;
+const PACK_FOLDER = '.pack';
+const PACK_FILE = `${PACK_FOLDER}/${TAR_NAME}`;
const IS_CI = Boolean(process.env['CI'] && process.env['CI'] !== 'false');
async function defaultNodeRunner() {
await installPackage();
await run('npm', ['run', 'tsc']);
- if (state.live) await run('npm', ['test']);
+ if (state.live) {
+ await run('npm', ['test']);
+ }
}
-const projects = {
+const projectRunners = {
'node-ts-cjs': defaultNodeRunner,
'node-ts-cjs-web': defaultNodeRunner,
'node-ts-cjs-auto': defaultNodeRunner,
@@ -76,30 +79,17 @@ const projects = {
}
},
deno: async () => {
+ // we don't need to explicitly install the package here
+ // because our deno setup relies on `rootDir/deno` to exist
+ // which is an artifact produced from our build process
await run('deno', ['task', 'install']);
- await installPackage();
- const packFile = getPackFile();
-
- const openaiDir = path.resolve(
- process.cwd(),
- 'node_modules',
- '.deno',
- 'openai@3.3.0',
- 'node_modules',
- 'openai',
- );
-
- await run('sh', ['-c', 'rm -rf *'], { cwd: openaiDir, stdio: 'inherit' });
- await run('tar', ['xzf', path.resolve(packFile)], { cwd: openaiDir, stdio: 'inherit' });
- await run('sh', ['-c', 'mv package/* .'], { cwd: openaiDir, stdio: 'inherit' });
- await run('sh', ['-c', 'rm -rf package'], { cwd: openaiDir, stdio: 'inherit' });
-
await run('deno', ['task', 'check']);
+
if (state.live) await run('deno', ['task', 'test']);
},
};
-const projectNames = Object.keys(projects) as Array;
+let projectNames = Object.keys(projectRunners) as Array;
const projectNamesSet = new Set(projectNames);
function parseArgs() {
@@ -118,6 +108,11 @@ function parseArgs() {
type: 'boolean',
default: false,
},
+ skip: {
+ type: 'array',
+ default: [],
+ description: 'Skip one or more projects. Separate project names with a space.',
+ },
skipPack: {
type: 'boolean',
default: false,
@@ -156,6 +151,10 @@ function parseArgs() {
default: false,
description: 'run all projects in parallel (jobs = # projects)',
},
+ noCleanup: {
+ type: 'boolean',
+ default: false,
+ },
})
.help().argv;
}
@@ -165,9 +164,32 @@ type Args = Awaited>;
let state: Args & { rootDir: string };
async function main() {
+ if (!process.env['OPENAI_API_KEY']) {
+ console.error(`Error: The environment variable OPENAI_API_KEY must be set. Run the command
+ $echo 'OPENAI_API_KEY = "'"\${OPENAI_API_KEY}"'"' >> ecosystem-tests/cloudflare-worker/wrangler.toml`);
+ process.exit(0);
+ }
+
const args = (await parseArgs()) as Args;
console.error(`args:`, args);
+ // Some projects, e.g. Deno can be slow to run, so offer the option to skip them. Example:
+ // --skip=deno node-ts-cjs
+ if (args.skip.length > 0) {
+ args.skip.forEach((projectName, idx) => {
+ // Ensure the inputted project name is lower case
+ args.skip[idx] = (projectName + '').toLowerCase();
+ });
+
+ projectNames = projectNames.filter((projectName) => (args.skip as string[]).indexOf(projectName) < 0);
+
+ args.skip.forEach((projectName) => {
+ projectNamesSet.delete(projectName as any);
+ });
+ }
+
+ const tmpFolderPath = path.resolve(process.cwd(), 'tmp');
+
const rootDir = await packageDir();
console.error(`rootDir:`, rootDir);
@@ -191,8 +213,63 @@ async function main() {
const failed: typeof projectNames = [];
+ let cleanupWasRun = false;
+
+ // Cleanup the various artifacts created as part of executing this script
+ async function runCleanup() {
+ if (cleanupWasRun) {
+ return;
+ }
+ cleanupWasRun = true;
+
+ // Restore the original files in the ecosystem-tests folders from before
+ // npm install was run
+ await fileCache.restoreFiles(tmpFolderPath);
+
+ const packFolderPath = path.join(process.cwd(), PACK_FOLDER);
+
+ try {
+ // Clean up the .pack folder if this was the process that created it.
+ await fs.unlink(PACK_FILE);
+ await fs.rmdir(packFolderPath);
+ } catch (err) {
+ console.log('Failed to delete .pack folder', err);
+ }
+
+ for (let i = 0; i < projectNames.length; i++) {
+ const projectName = (projectNames as any)[i] as string;
+
+ await defaultNodeCleanup(projectName).catch((err: any) => {
+ console.error('Error: Cleanup of file artifacts failed for project', projectName, err);
+ });
+ }
+ }
+
+ async function runCleanupAndExit() {
+ await runCleanup();
+
+ process.exit(1);
+ }
+
+ if (!(await fileExists(tmpFolderPath))) {
+ await fs.mkdir(tmpFolderPath);
+ }
+
let { jobs } = args;
- if (args.parallel) jobs = projectsToRun.length;
+ if (args.parallel) {
+ jobs = projectsToRun.length;
+ }
+
+ if (!args.noCleanup) {
+ // The cleanup code is only executed from the parent script that runs
+ // multiple projects.
+ process.on('SIGINT', runCleanupAndExit);
+ process.on('SIGTERM', runCleanupAndExit);
+ process.on('exit', runCleanup);
+
+ await fileCache.cacheFiles(tmpFolderPath);
+ }
+
if (jobs > 1) {
const queue = [...projectsToRun];
const runningProjects = new Set();
@@ -225,7 +302,9 @@ async function main() {
[...Array(jobs).keys()].map(async () => {
while (queue.length) {
const project = queue.shift();
- if (!project) break;
+ if (!project) {
+ break;
+ }
// preserve interleaved ordering of writes to stdout/stderr
const chunks: { dest: 'stdout' | 'stderr'; data: string | Buffer }[] = [];
@@ -238,6 +317,7 @@ async function main() {
__filename,
project,
'--skip-pack',
+ '--noCleanup',
`--retry=${args.retry}`,
...(args.live ? ['--live'] : []),
...(args.verbose ? ['--verbose'] : []),
@@ -248,6 +328,7 @@ async function main() {
);
child.stdout?.on('data', (data) => chunks.push({ dest: 'stdout', data }));
child.stderr?.on('data', (data) => chunks.push({ dest: 'stderr', data }));
+
await child;
} catch (error) {
failed.push(project);
@@ -255,7 +336,10 @@ async function main() {
runningProjects.delete(project);
}
- if (IS_CI) console.log(`::group::${failed.includes(project) ? '❌' : '✅'} ${project}`);
+ if (IS_CI) {
+ console.log(`::group::${failed.includes(project) ? '❌' : '✅'} ${project}`);
+ }
+
for (const { data } of chunks) {
process.stdout.write(data);
}
@@ -268,7 +352,7 @@ async function main() {
clearProgress();
} else {
for (const project of projectsToRun) {
- const fn = projects[project];
+ const fn = projectRunners[project];
await withChdir(path.join(rootDir, 'ecosystem-tests', project), async () => {
console.error('\n');
@@ -294,6 +378,10 @@ async function main() {
}
}
+ if (!args.noCleanup) {
+ await runCleanup();
+ }
+
if (failed.length) {
console.error(`${failed.length} project(s) failed - ${failed.join(', ')}`);
process.exit(1);
@@ -340,10 +428,15 @@ async function buildPackage() {
return;
}
- if (!(await pathExists('.pack'))) {
- await fs.mkdir('.pack');
+ if (!(await pathExists(PACK_FOLDER))) {
+ await fs.mkdir(PACK_FOLDER);
}
+ // Run our build script to ensure all of our build artifacts are up to date.
+ // This matters the most for deno as it directly relies on build artifacts
+ // instead of the pack file
+ await run('yarn', ['build']);
+
const proc = await run('npm', ['pack', '--ignore-scripts', '--json'], {
cwd: path.join(process.cwd(), 'dist'),
alwaysPipe: true,
@@ -366,6 +459,11 @@ async function installPackage() {
return;
}
+ try {
+ // Ensure that there is a clean node_modules folder.
+ await run('rm', ['-rf', `./node_modules`]);
+ } catch (err) {}
+
const packFile = getPackFile();
await fs.copyFile(packFile, `./${TAR_NAME}`);
return await run('npm', ['install', '-D', `./${TAR_NAME}`]);
@@ -440,6 +538,80 @@ export const packageDir = async (): Promise => {
throw new Error('Package directory not found');
};
+// Caches files that are modified by this script, e.g. package.json,
+// so that they can be restored when the script either finishes or is
+// terminated
+const fileCache = (() => {
+ const filesToCache: Array = ['package.json', 'package-lock.json', 'deno.lock', 'bun.lockb'];
+
+ return {
+ // Copy existing files from each ecosystem-tests project folder to the ./tmp folder
+ cacheFiles: async (tmpFolderPath: string) => {
+ for (let i = 0; i < projectNames.length; i++) {
+ const projectName = (projectNames as any)[i] as string;
+ const projectPath = path.resolve(process.cwd(), 'ecosystem-tests', projectName);
+
+ for (let j = 0; j < filesToCache.length; j++) {
+ const fileName = filesToCache[j] || '';
+
+ const filePath = path.resolve(projectPath, fileName);
+ if (await fileExists(filePath)) {
+ const tmpProjectPath = path.resolve(tmpFolderPath, projectName);
+
+ if (!(await fileExists(tmpProjectPath))) {
+ await fs.mkdir(tmpProjectPath);
+ }
+ await fs.copyFile(filePath, path.resolve(tmpProjectPath, fileName));
+ }
+ }
+ }
+ },
+
+ // Restore the original files to each ecosystem-tests project folder from the ./tmp folder
+ restoreFiles: async (tmpFolderPath: string) => {
+ for (let i = 0; i < projectNames.length; i++) {
+ const projectName = (projectNames as any)[i] as string;
+
+ const projectPath = path.resolve(process.cwd(), 'ecosystem-tests', projectName);
+ const tmpProjectPath = path.resolve(tmpFolderPath, projectName);
+
+ for (let j = 0; j < filesToCache.length; j++) {
+ const fileName = filesToCache[j] || '';
+
+ const filePath = path.resolve(tmpProjectPath, fileName);
+ if (await fileExists(filePath)) {
+ await fs.rename(filePath, path.resolve(projectPath, fileName));
+ }
+ }
+ await fs.rmdir(tmpProjectPath);
+ }
+ },
+ };
+})();
+
+async function defaultNodeCleanup(projectName: string) {
+ try {
+ const projectPath = path.resolve(process.cwd(), 'ecosystem-tests', projectName);
+
+ const packFilePath = path.resolve(projectPath, TAR_NAME);
+
+ if (await fileExists(packFilePath)) {
+ await fs.unlink(packFilePath);
+ }
+ } catch (err) {
+ console.error('Cleanup failed for project', projectName, err);
+ }
+}
+
+async function fileExists(filePath: string) {
+ try {
+ await fs.stat(filePath);
+ return true;
+ } catch {
+ return false;
+ }
+}
+
main().catch((err) => {
console.error(err);
process.exit(1);
diff --git a/ecosystem-tests/deno/deno.jsonc b/ecosystem-tests/deno/deno.jsonc
index ba78e9d30..7de05f2ba 100644
--- a/ecosystem-tests/deno/deno.jsonc
+++ b/ecosystem-tests/deno/deno.jsonc
@@ -3,5 +3,9 @@
"install": "deno install --node-modules-dir main_test.ts -f",
"check": "deno lint && deno check main_test.ts",
"test": "deno test --allow-env --allow-net --allow-read --node-modules-dir"
+ },
+ "imports": {
+ "openai": "../../deno/mod.ts",
+ "openai/": "../../deno/"
}
}
diff --git a/ecosystem-tests/deno/deno.lock b/ecosystem-tests/deno/deno.lock
index 17a25fcbc..aa22a1427 100644
--- a/ecosystem-tests/deno/deno.lock
+++ b/ecosystem-tests/deno/deno.lock
@@ -1,20 +1,14 @@
{
- "version": "2",
- "remote": {
- "https://deno.land/std@0.192.0/fmt/colors.ts": "d67e3cd9f472535241a8e410d33423980bec45047e343577554d3356e1f0ef4e",
- "https://deno.land/std@0.192.0/testing/_diff.ts": "1a3c044aedf77647d6cac86b798c6417603361b66b54c53331b312caeb447aea",
- "https://deno.land/std@0.192.0/testing/_format.ts": "a69126e8a469009adf4cf2a50af889aca364c349797e63174884a52ff75cf4c7",
- "https://deno.land/std@0.192.0/testing/asserts.ts": "e16d98b4d73ffc4ed498d717307a12500ae4f2cbe668f1a215632d19fcffc22f"
- },
- "npm": {
+ "version": "3",
+ "packages": {
"specifiers": {
- "@types/node@^20.3.1": "@types/node@20.3.1",
- "node-fetch@^3.0.0": "node-fetch@3.3.1",
- "openai": "openai@3.3.0",
- "ts-node@^10.9.1": "ts-node@10.9.1_@types+node@20.3.1_typescript@5.1.3",
- "typescript@^5.1.3": "typescript@5.1.3"
+ "npm:@types/node@^20.3.1": "npm:@types/node@20.3.1",
+ "npm:node-fetch@^3.0.0": "npm:node-fetch@3.3.1",
+ "npm:openai": "npm:openai@3.3.0",
+ "npm:ts-node@^10.9.1": "npm:ts-node@10.9.1_@types+node@20.3.1_typescript@5.1.3",
+ "npm:typescript@^5.1.3": "npm:typescript@5.1.3"
},
- "packages": {
+ "npm": {
"@cspotcode/source-map-support@0.8.1": {
"integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==",
"dependencies": {
@@ -195,5 +189,15 @@
"dependencies": {}
}
}
+ },
+ "redirects": {
+ "https://deno.land/x/fastest_levenshtein/mod.ts": "https://deno.land/x/fastest_levenshtein@1.0.10/mod.ts"
+ },
+ "remote": {
+ "https://deno.land/std@0.192.0/fmt/colors.ts": "d67e3cd9f472535241a8e410d33423980bec45047e343577554d3356e1f0ef4e",
+ "https://deno.land/std@0.192.0/testing/_diff.ts": "1a3c044aedf77647d6cac86b798c6417603361b66b54c53331b312caeb447aea",
+ "https://deno.land/std@0.192.0/testing/_format.ts": "a69126e8a469009adf4cf2a50af889aca364c349797e63174884a52ff75cf4c7",
+ "https://deno.land/std@0.192.0/testing/asserts.ts": "e16d98b4d73ffc4ed498d717307a12500ae4f2cbe668f1a215632d19fcffc22f",
+ "https://deno.land/x/fastest_levenshtein@1.0.10/mod.ts": "aea49d54b6bb37082b2377da2ea068331da07b2a515621d8eff97538b7157b40"
}
}
diff --git a/ecosystem-tests/deno/import_map.json b/ecosystem-tests/deno/import_map.json
deleted file mode 100644
index 941f5396b..000000000
--- a/ecosystem-tests/deno/import_map.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "imports": {
- "/": "./",
- "./": "./"
- }
-}
diff --git a/ecosystem-tests/deno/main_test.ts b/ecosystem-tests/deno/main_test.ts
index b841b4053..b27c9079b 100644
--- a/ecosystem-tests/deno/main_test.ts
+++ b/ecosystem-tests/deno/main_test.ts
@@ -1,7 +1,6 @@
import { assertEquals, AssertionError } from 'https://deno.land/std@0.192.0/testing/asserts.ts';
-import OpenAI, { toFile } from 'npm:openai@3.3.0';
import { distance } from 'https://deno.land/x/fastest_levenshtein/mod.ts';
-import { ChatCompletion } from 'npm:openai@3.3.0/resources/chat/completions';
+import OpenAI, { toFile } from 'openai';
const url = 'https://audio-samples.github.io/samples/mp3/blizzard_biased/sample-1.mp3';
const filename = 'sample-1.mp3';
@@ -66,7 +65,7 @@ Deno.test(async function rawResponse() {
offset += chunk.length;
}
- const json: ChatCompletion = JSON.parse(new TextDecoder().decode(merged));
+ const json: OpenAI.ChatCompletion = JSON.parse(new TextDecoder().decode(merged));
assertSimilar(json.choices[0]?.message.content || '', 'This is a test', 10);
});
From b6acf54baab7e6cbf6ce3ad1d6c70197cc0181d0 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 12 Apr 2024 12:52:04 -0400
Subject: [PATCH 057/533] chore(internal): formatting (#763)
---
.github/workflows/ci.yml | 2 ++
1 file changed, 2 insertions(+)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index f51c7a308..d6c83025f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -28,3 +28,5 @@ jobs:
- name: Check types
run: |
yarn build
+
+
From a22c6f3e7ffc2367c71cdec106b9803dd26b6397 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Fri, 12 Apr 2024 12:52:29 -0400
Subject: [PATCH 058/533] release: 4.33.1
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 9 +++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 14 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index e5b450ff3..bd6b3284c 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.33.0"
+ ".": "4.33.1"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f865d94f7..f3067e694 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,14 @@
# Changelog
+## 4.33.1 (2024-04-12)
+
+Full Changelog: [v4.33.0...v4.33.1](https://github.com/openai/openai-node/compare/v4.33.0...v4.33.1)
+
+### Chores
+
+* **internal:** formatting ([#763](https://github.com/openai/openai-node/issues/763)) ([b6acf54](https://github.com/openai/openai-node/commit/b6acf54baab7e6cbf6ce3ad1d6c70197cc0181d0))
+* **internal:** improve ecosystem tests ([#761](https://github.com/openai/openai-node/issues/761)) ([fcf748d](https://github.com/openai/openai-node/commit/fcf748dbbd23f972ff9fd81a8b2a35232a2d6e5c))
+
## 4.33.0 (2024-04-05)
Full Changelog: [v4.32.2...v4.33.0](https://github.com/openai/openai-node/compare/v4.32.2...v4.33.0)
diff --git a/README.md b/README.md
index 62c8967c6..2d1ae6089 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.33.0/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.33.1/mod.ts';
```
diff --git a/build-deno b/build-deno
index bbe96faae..c06cd3bcf 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.33.0/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.33.1/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index 490a9e492..998b6a2c7 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.33.0",
+ "version": "4.33.1",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index 6726dc21c..0d8f2ffd7 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.33.0'; // x-release-please-version
+export const VERSION = '4.33.1'; // x-release-please-version
From 01f01881c457fa6bebf8ac923941c6628037b9ac Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 15 Apr 2024 15:04:29 -0400
Subject: [PATCH 059/533] feat(api): updates (#766)
---
.stats.yml | 2 +-
api.md | 32 +-
src/resources/beta/assistants/assistants.ts | 27 +-
src/resources/beta/beta.ts | 5 +
src/resources/beta/index.ts | 9 +-
src/resources/beta/threads/index.ts | 29 +-
src/resources/beta/threads/runs/runs.ts | 421 +++++++++++++++++-
src/resources/beta/threads/threads.ts | 323 +++++++++++++-
src/resources/chat/completions.ts | 5 +-
src/resources/fine-tuning/fine-tuning.ts | 5 +-
src/resources/fine-tuning/index.ts | 5 +-
src/resources/fine-tuning/jobs/checkpoints.ts | 108 +++++
src/resources/fine-tuning/jobs/index.ts | 21 +
src/resources/fine-tuning/{ => jobs}/jobs.ts | 135 +++++-
.../beta/assistants/assistants.test.ts | 4 +-
.../beta/threads/runs/runs.test.ts | 7 +-
.../beta/threads/threads.test.ts | 7 +-
tests/api-resources/chat/completions.test.ts | 4 +-
.../fine-tuning/jobs/checkpoints.test.ts | 42 ++
.../fine-tuning/{ => jobs}/jobs.test.ts | 30 ++
20 files changed, 1177 insertions(+), 44 deletions(-)
create mode 100644 src/resources/fine-tuning/jobs/checkpoints.ts
create mode 100644 src/resources/fine-tuning/jobs/index.ts
rename src/resources/fine-tuning/{ => jobs}/jobs.ts (66%)
create mode 100644 tests/api-resources/fine-tuning/jobs/checkpoints.test.ts
rename tests/api-resources/fine-tuning/{ => jobs}/jobs.test.ts (87%)
diff --git a/.stats.yml b/.stats.yml
index c550abf3c..284caebf4 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1 @@
-configured_endpoints: 51
+configured_endpoints: 52
diff --git a/api.md b/api.md
index 2f82dd17b..c6a2bf273 100644
--- a/api.md
+++ b/api.md
@@ -149,16 +149,29 @@ Methods:
Types:
--
FineTuningJob
-- FineTuningJobEvent
+- FineTuningJob
+- FineTuningJobEvent
+- FineTuningJobIntegration
+- FineTuningJobWandbIntegration
+- FineTuningJobWandbIntegrationObject
Methods:
-- client.fineTuning.jobs.create({ ...params }) -> FineTuningJob
-- client.fineTuning.jobs.retrieve(fineTuningJobId) -> FineTuningJob
-- client.fineTuning.jobs.list({ ...params }) -> FineTuningJobsPage
-- client.fineTuning.jobs.cancel(fineTuningJobId) -> FineTuningJob
-- client.fineTuning.jobs.listEvents(fineTuningJobId, { ...params }) -> FineTuningJobEventsPage
+- client.fineTuning.jobs.create({ ...params }) -> FineTuningJob
+- client.fineTuning.jobs.retrieve(fineTuningJobId) -> FineTuningJob
+- client.fineTuning.jobs.list({ ...params }) -> FineTuningJobsPage
+- client.fineTuning.jobs.cancel(fineTuningJobId) -> FineTuningJob
+- client.fineTuning.jobs.listEvents(fineTuningJobId, { ...params }) -> FineTuningJobEventsPage
+
+### Checkpoints
+
+Types:
+
+- FineTuningJobCheckpoint
+
+Methods:
+
+- client.fineTuning.jobs.checkpoints.list(fineTuningJobId, { ...params }) -> FineTuningJobCheckpointsPage
# Beta
@@ -214,6 +227,11 @@ Methods:
Types:
+- AssistantResponseFormat
+- AssistantResponseFormatOption
+- AssistantToolChoice
+- AssistantToolChoiceFunction
+- AssistantToolChoiceOption
- Thread
- ThreadDeleted
diff --git a/src/resources/beta/assistants/assistants.ts b/src/resources/beta/assistants/assistants.ts
index 1e8ca6ee9..fc9afe2ae 100644
--- a/src/resources/beta/assistants/assistants.ts
+++ b/src/resources/beta/assistants/assistants.ts
@@ -113,7 +113,7 @@ export interface Assistant {
file_ids: Array;
/**
- * The system instructions that the assistant uses. The maximum length is 32768
+ * The system instructions that the assistant uses. The maximum length is 256,000
* characters.
*/
instructions: string | null;
@@ -930,7 +930,26 @@ export interface AssistantCreateParams {
* [Model overview](https://platform.openai.com/docs/models/overview) for
* descriptions of them.
*/
- model: string;
+ model:
+ | (string & {})
+ | 'gpt-4-turbo'
+ | 'gpt-4-turbo-2024-04-09'
+ | 'gpt-4-0125-preview'
+ | 'gpt-4-turbo-preview'
+ | 'gpt-4-1106-preview'
+ | 'gpt-4-vision-preview'
+ | 'gpt-4'
+ | 'gpt-4-0314'
+ | 'gpt-4-0613'
+ | 'gpt-4-32k'
+ | 'gpt-4-32k-0314'
+ | 'gpt-4-32k-0613'
+ | 'gpt-3.5-turbo'
+ | 'gpt-3.5-turbo-16k'
+ | 'gpt-3.5-turbo-0613'
+ | 'gpt-3.5-turbo-1106'
+ | 'gpt-3.5-turbo-0125'
+ | 'gpt-3.5-turbo-16k-0613';
/**
* The description of the assistant. The maximum length is 512 characters.
@@ -945,7 +964,7 @@ export interface AssistantCreateParams {
file_ids?: Array;
/**
- * The system instructions that the assistant uses. The maximum length is 32768
+ * The system instructions that the assistant uses. The maximum length is 256,000
* characters.
*/
instructions?: string | null;
@@ -986,7 +1005,7 @@ export interface AssistantUpdateParams {
file_ids?: Array;
/**
- * The system instructions that the assistant uses. The maximum length is 32768
+ * The system instructions that the assistant uses. The maximum length is 256,000
* characters.
*/
instructions?: string | null;
diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts
index 7d4457319..8f8148f9b 100644
--- a/src/resources/beta/beta.ts
+++ b/src/resources/beta/beta.ts
@@ -30,6 +30,11 @@ export namespace Beta {
export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams;
export import AssistantListParams = AssistantsAPI.AssistantListParams;
export import Threads = ThreadsAPI.Threads;
+ export import AssistantResponseFormat = ThreadsAPI.AssistantResponseFormat;
+ export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption;
+ export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice;
+ export import AssistantToolChoiceFunction = ThreadsAPI.AssistantToolChoiceFunction;
+ export import AssistantToolChoiceOption = ThreadsAPI.AssistantToolChoiceOption;
export import Thread = ThreadsAPI.Thread;
export import ThreadDeleted = ThreadsAPI.ThreadDeleted;
export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams;
diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts
index e43ff7315..54407edb3 100644
--- a/src/resources/beta/index.ts
+++ b/src/resources/beta/index.ts
@@ -18,9 +18,12 @@ export {
AssistantsPage,
Assistants,
} from './assistants/index';
-export { Beta } from './beta';
-export { Chat } from './chat/index';
export {
+ AssistantResponseFormat,
+ AssistantResponseFormatOption,
+ AssistantToolChoice,
+ AssistantToolChoiceFunction,
+ AssistantToolChoiceOption,
Thread,
ThreadDeleted,
ThreadCreateParams,
@@ -32,3 +35,5 @@ export {
ThreadCreateAndRunStreamParams,
Threads,
} from './threads/index';
+export { Beta } from './beta';
+export { Chat } from './chat/index';
diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts
index ac2f9a4fa..5f41766a9 100644
--- a/src/resources/beta/threads/index.ts
+++ b/src/resources/beta/threads/index.ts
@@ -27,6 +27,23 @@ export {
MessagesPage,
Messages,
} from './messages/index';
+export {
+ AssistantResponseFormat,
+ AssistantResponseFormatOption,
+ AssistantToolChoice,
+ AssistantToolChoiceFunction,
+ AssistantToolChoiceOption,
+ Thread,
+ ThreadDeleted,
+ ThreadCreateParams,
+ ThreadUpdateParams,
+ ThreadCreateAndRunParams,
+ ThreadCreateAndRunParamsNonStreaming,
+ ThreadCreateAndRunParamsStreaming,
+ ThreadCreateAndRunPollParams,
+ ThreadCreateAndRunStreamParams,
+ Threads,
+} from './threads';
export {
RequiredActionFunctionToolCall,
Run,
@@ -47,15 +64,3 @@ export {
RunsPage,
Runs,
} from './runs/index';
-export {
- Thread,
- ThreadDeleted,
- ThreadCreateParams,
- ThreadUpdateParams,
- ThreadCreateAndRunParams,
- ThreadCreateAndRunParamsNonStreaming,
- ThreadCreateAndRunParamsStreaming,
- ThreadCreateAndRunPollParams,
- ThreadCreateAndRunStreamParams,
- Threads,
-} from './threads';
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index 04234a74f..4cfa6c36e 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -9,6 +9,7 @@ import { sleep } from 'openai/core';
import { RunSubmitToolOutputsParamsStream } from 'openai/lib/AssistantStream';
import * as RunsAPI from 'openai/resources/beta/threads/runs/runs';
import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
+import * as ThreadsAPI from 'openai/resources/beta/threads/threads';
import * as StepsAPI from 'openai/resources/beta/threads/runs/steps';
import { CursorPage, type CursorPageParams } from 'openai/pagination';
import { Stream } from 'openai/streaming';
@@ -356,6 +357,12 @@ export interface Run {
*/
file_ids: Array;
+ /**
+ * Details on why the run is incomplete. Will be `null` if the run is not
+ * incomplete.
+ */
+ incomplete_details: Run.IncompleteDetails | null;
+
/**
* The instructions that the
* [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
@@ -368,6 +375,18 @@ export interface Run {
*/
last_error: Run.LastError | null;
+ /**
+ * The maximum number of completion tokens specified to have been used over the
+ * course of the run.
+ */
+ max_completion_tokens: number | null;
+
+ /**
+ * The maximum number of prompt tokens specified to have been used over the course
+ * of the run.
+ */
+ max_prompt_tokens: number | null;
+
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
* for storing additional information about the object in a structured format. Keys
@@ -394,6 +413,24 @@ export interface Run {
*/
required_action: Run.RequiredAction | null;
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+ response_format: ThreadsAPI.AssistantResponseFormatOption | null;
+
/**
* The Unix timestamp (in seconds) for when the run was started.
*/
@@ -412,6 +449,16 @@ export interface Run {
*/
thread_id: string;
+ /**
+ * Controls which (if any) tool is called by the model. `none` means the model will
+ * not call any tools and instead generates a message. `auto` is the default value
+ * and means the model can pick between generating a message or calling a tool.
+ * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ * call that tool.
+ */
+ tool_choice: ThreadsAPI.AssistantToolChoiceOption | null;
+
/**
* The list of tools that the
* [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
@@ -419,6 +466,8 @@ export interface Run {
*/
tools: Array;
+ truncation_strategy: Run.TruncationStrategy | null;
+
/**
* Usage statistics related to the run. This value will be `null` if the run is not
* in a terminal state (i.e. `in_progress`, `queued`, etc.).
@@ -432,6 +481,18 @@ export interface Run {
}
export namespace Run {
+ /**
+ * Details on why the run is incomplete. Will be `null` if the run is not
+ * incomplete.
+ */
+ export interface IncompleteDetails {
+ /**
+ * The reason why the run is incomplete. This will point to which specific token
+ * limit was reached over the course of the run.
+ */
+ reason?: 'max_completion_tokens' | 'max_prompt_tokens';
+ }
+
/**
* The last error associated with this run. Will be `null` if there are no errors.
*/
@@ -475,6 +536,22 @@ export namespace Run {
}
}
+ export interface TruncationStrategy {
+ /**
+ * The truncation strategy to use for the thread. The default is `auto`. If set to
+ * `last_messages`, the thread will be truncated to the n most recent messages in
+ * the thread. When set to `auto`, messages in the middle of the thread will be
+ * dropped to fit the context length of the model, `max_prompt_tokens`.
+ */
+ type: 'auto' | 'last_messages';
+
+ /**
+ * The number of most recent messages from the thread when constructing the context
+ * for the run.
+ */
+ last_messages?: number | null;
+ }
+
/**
* Usage statistics related to the run. This value will be `null` if the run is not
* in a terminal state (i.e. `in_progress`, `queued`, etc.).
@@ -541,6 +618,24 @@ export interface RunCreateParamsBase {
*/
instructions?: string | null;
+ /**
+ * The maximum number of completion tokens that may be used over the course of the
+ * run. The run will make a best effort to use only the number of completion tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * completion tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_completion_tokens?: number | null;
+
+ /**
+ * The maximum number of prompt tokens that may be used over the course of the run.
+ * The run will make a best effort to use only the number of prompt tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * prompt tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_prompt_tokens?: number | null;
+
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
* for storing additional information about the object in a structured format. Keys
@@ -555,7 +650,45 @@ export interface RunCreateParamsBase {
* model associated with the assistant. If not, the model associated with the
* assistant will be used.
*/
- model?: string | null;
+ model?:
+ | (string & {})
+ | 'gpt-4-turbo'
+ | 'gpt-4-turbo-2024-04-09'
+ | 'gpt-4-0125-preview'
+ | 'gpt-4-turbo-preview'
+ | 'gpt-4-1106-preview'
+ | 'gpt-4-vision-preview'
+ | 'gpt-4'
+ | 'gpt-4-0314'
+ | 'gpt-4-0613'
+ | 'gpt-4-32k'
+ | 'gpt-4-32k-0314'
+ | 'gpt-4-32k-0613'
+ | 'gpt-3.5-turbo'
+ | 'gpt-3.5-turbo-16k'
+ | 'gpt-3.5-turbo-0613'
+ | 'gpt-3.5-turbo-1106'
+ | 'gpt-3.5-turbo-0125'
+ | 'gpt-3.5-turbo-16k-0613'
+ | null;
+
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+ response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
/**
* If `true`, returns a stream of events that happen during the Run as server-sent
@@ -571,11 +704,23 @@ export interface RunCreateParamsBase {
*/
temperature?: number | null;
+ /**
+ * Controls which (if any) tool is called by the model. `none` means the model will
+ * not call any tools and instead generates a message. `auto` is the default value
+ * and means the model can pick between generating a message or calling a tool.
+ * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ * call that tool.
+ */
+ tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
*/
tools?: Array | null;
+
+ truncation_strategy?: RunCreateParams.TruncationStrategy | null;
}
export namespace RunCreateParams {
@@ -612,6 +757,22 @@ export namespace RunCreateParams {
metadata?: unknown | null;
}
+ export interface TruncationStrategy {
+ /**
+ * The truncation strategy to use for the thread. The default is `auto`. If set to
+ * `last_messages`, the thread will be truncated to the n most recent messages in
+ * the thread. When set to `auto`, messages in the middle of the thread will be
+ * dropped to fit the context length of the model, `max_prompt_tokens`.
+ */
+ type: 'auto' | 'last_messages';
+
+ /**
+ * The number of most recent messages from the thread when constructing the context
+ * for the run.
+ */
+ last_messages?: number | null;
+ }
+
export type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming;
export type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
}
@@ -687,6 +848,24 @@ export interface RunCreateAndPollParams {
*/
instructions?: string | null;
+ /**
+ * The maximum number of completion tokens that may be used over the course of the
+ * run. The run will make a best effort to use only the number of completion tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * completion tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_completion_tokens?: number | null;
+
+ /**
+ * The maximum number of prompt tokens that may be used over the course of the run.
+ * The run will make a best effort to use only the number of prompt tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * prompt tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_prompt_tokens?: number | null;
+
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
* for storing additional information about the object in a structured format. Keys
@@ -701,7 +880,45 @@ export interface RunCreateAndPollParams {
* model associated with the assistant. If not, the model associated with the
* assistant will be used.
*/
- model?: string | null;
+ model?:
+ | (string & {})
+ | 'gpt-4-turbo'
+ | 'gpt-4-turbo-2024-04-09'
+ | 'gpt-4-0125-preview'
+ | 'gpt-4-turbo-preview'
+ | 'gpt-4-1106-preview'
+ | 'gpt-4-vision-preview'
+ | 'gpt-4'
+ | 'gpt-4-0314'
+ | 'gpt-4-0613'
+ | 'gpt-4-32k'
+ | 'gpt-4-32k-0314'
+ | 'gpt-4-32k-0613'
+ | 'gpt-3.5-turbo'
+ | 'gpt-3.5-turbo-16k'
+ | 'gpt-3.5-turbo-0613'
+ | 'gpt-3.5-turbo-1106'
+ | 'gpt-3.5-turbo-0125'
+ | 'gpt-3.5-turbo-16k-0613'
+ | null;
+
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+ response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
@@ -710,11 +927,23 @@ export interface RunCreateAndPollParams {
*/
temperature?: number | null;
+ /**
+ * Controls which (if any) tool is called by the model. `none` means the model will
+ * not call any tools and instead generates a message. `auto` is the default value
+ * and means the model can pick between generating a message or calling a tool.
+ * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ * call that tool.
+ */
+ tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
*/
tools?: Array | null;
+
+ truncation_strategy?: RunCreateAndPollParams.TruncationStrategy | null;
}
export namespace RunCreateAndPollParams {
@@ -750,6 +979,22 @@ export namespace RunCreateAndPollParams {
*/
metadata?: unknown | null;
}
+
+ export interface TruncationStrategy {
+ /**
+ * The truncation strategy to use for the thread. The default is `auto`. If set to
+ * `last_messages`, the thread will be truncated to the n most recent messages in
+ * the thread. When set to `auto`, messages in the middle of the thread will be
+ * dropped to fit the context length of the model, `max_prompt_tokens`.
+ */
+ type: 'auto' | 'last_messages';
+
+ /**
+ * The number of most recent messages from the thread when constructing the context
+ * for the run.
+ */
+ last_messages?: number | null;
+ }
}
export interface RunCreateAndStreamParams {
@@ -779,6 +1024,24 @@ export interface RunCreateAndStreamParams {
*/
instructions?: string | null;
+ /**
+ * The maximum number of completion tokens that may be used over the course of the
+ * run. The run will make a best effort to use only the number of completion tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * completion tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_completion_tokens?: number | null;
+
+ /**
+ * The maximum number of prompt tokens that may be used over the course of the run.
+ * The run will make a best effort to use only the number of prompt tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * prompt tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_prompt_tokens?: number | null;
+
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
* for storing additional information about the object in a structured format. Keys
@@ -793,7 +1056,45 @@ export interface RunCreateAndStreamParams {
* model associated with the assistant. If not, the model associated with the
* assistant will be used.
*/
- model?: string | null;
+ model?:
+ | (string & {})
+ | 'gpt-4-turbo'
+ | 'gpt-4-turbo-2024-04-09'
+ | 'gpt-4-0125-preview'
+ | 'gpt-4-turbo-preview'
+ | 'gpt-4-1106-preview'
+ | 'gpt-4-vision-preview'
+ | 'gpt-4'
+ | 'gpt-4-0314'
+ | 'gpt-4-0613'
+ | 'gpt-4-32k'
+ | 'gpt-4-32k-0314'
+ | 'gpt-4-32k-0613'
+ | 'gpt-3.5-turbo'
+ | 'gpt-3.5-turbo-16k'
+ | 'gpt-3.5-turbo-0613'
+ | 'gpt-3.5-turbo-1106'
+ | 'gpt-3.5-turbo-0125'
+ | 'gpt-3.5-turbo-16k-0613'
+ | null;
+
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+ response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
@@ -802,11 +1103,23 @@ export interface RunCreateAndStreamParams {
*/
temperature?: number | null;
+ /**
+ * Controls which (if any) tool is called by the model. `none` means the model will
+ * not call any tools and instead generates a message. `auto` is the default value
+ * and means the model can pick between generating a message or calling a tool.
+ * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ * call that tool.
+ */
+ tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
*/
tools?: Array | null;
+
+ truncation_strategy?: RunCreateAndStreamParams.TruncationStrategy | null;
}
export namespace RunCreateAndStreamParams {
@@ -842,6 +1155,22 @@ export namespace RunCreateAndStreamParams {
*/
metadata?: unknown | null;
}
+
+ export interface TruncationStrategy {
+ /**
+ * The truncation strategy to use for the thread. The default is `auto`. If set to
+ * `last_messages`, the thread will be truncated to the n most recent messages in
+ * the thread. When set to `auto`, messages in the middle of the thread will be
+ * dropped to fit the context length of the model, `max_prompt_tokens`.
+ */
+ type: 'auto' | 'last_messages';
+
+ /**
+ * The number of most recent messages from the thread when constructing the context
+ * for the run.
+ */
+ last_messages?: number | null;
+ }
}
export interface RunStreamParams {
@@ -871,6 +1200,24 @@ export interface RunStreamParams {
*/
instructions?: string | null;
+ /**
+ * The maximum number of completion tokens that may be used over the course of the
+ * run. The run will make a best effort to use only the number of completion tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * completion tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_completion_tokens?: number | null;
+
+ /**
+ * The maximum number of prompt tokens that may be used over the course of the run.
+ * The run will make a best effort to use only the number of prompt tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * prompt tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_prompt_tokens?: number | null;
+
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
* for storing additional information about the object in a structured format. Keys
@@ -885,7 +1232,45 @@ export interface RunStreamParams {
* model associated with the assistant. If not, the model associated with the
* assistant will be used.
*/
- model?: string | null;
+ model?:
+ | (string & {})
+ | 'gpt-4-turbo'
+ | 'gpt-4-turbo-2024-04-09'
+ | 'gpt-4-0125-preview'
+ | 'gpt-4-turbo-preview'
+ | 'gpt-4-1106-preview'
+ | 'gpt-4-vision-preview'
+ | 'gpt-4'
+ | 'gpt-4-0314'
+ | 'gpt-4-0613'
+ | 'gpt-4-32k'
+ | 'gpt-4-32k-0314'
+ | 'gpt-4-32k-0613'
+ | 'gpt-3.5-turbo'
+ | 'gpt-3.5-turbo-16k'
+ | 'gpt-3.5-turbo-0613'
+ | 'gpt-3.5-turbo-1106'
+ | 'gpt-3.5-turbo-0125'
+ | 'gpt-3.5-turbo-16k-0613'
+ | null;
+
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+ response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
@@ -894,11 +1279,23 @@ export interface RunStreamParams {
*/
temperature?: number | null;
+ /**
+ * Controls which (if any) tool is called by the model. `none` means the model will
+ * not call any tools and instead generates a message. `auto` is the default value
+ * and means the model can pick between generating a message or calling a tool.
+ * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ * call that tool.
+ */
+ tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
*/
tools?: Array | null;
+
+ truncation_strategy?: RunStreamParams.TruncationStrategy | null;
}
export namespace RunStreamParams {
@@ -934,6 +1331,22 @@ export namespace RunStreamParams {
*/
metadata?: unknown | null;
}
+
+ export interface TruncationStrategy {
+ /**
+ * The truncation strategy to use for the thread. The default is `auto`. If set to
+ * `last_messages`, the thread will be truncated to the n most recent messages in
+ * the thread. When set to `auto`, messages in the middle of the thread will be
+ * dropped to fit the context length of the model, `max_prompt_tokens`.
+ */
+ type: 'auto' | 'last_messages';
+
+ /**
+ * The number of most recent messages from the thread when constructing the context
+ * for the run.
+ */
+ last_messages?: number | null;
+ }
}
export type RunSubmitToolOutputsParams =
diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts
index 1b4b3f7d5..29682c308 100644
--- a/src/resources/beta/threads/threads.ts
+++ b/src/resources/beta/threads/threads.ts
@@ -116,6 +116,66 @@ export class Threads extends APIResource {
}
}
+/**
+ * An object describing the expected output of the model. If `json_object` only
+ * `function` type `tools` are allowed to be passed to the Run. If `text` the model
+ * can return text or any value needed.
+ */
+export interface AssistantResponseFormat {
+ /**
+ * Must be one of `text` or `json_object`.
+ */
+ type?: 'text' | 'json_object';
+}
+
+/**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+export type AssistantResponseFormatOption = 'none' | 'auto' | AssistantResponseFormat;
+
+/**
+ * Specifies a tool the model should use. Use to force the model to call a specific
+ * tool.
+ */
+export interface AssistantToolChoice {
+ /**
+ * The type of the tool. If type is `function`, the function name must be set
+ */
+ type: 'function' | 'code_interpreter' | 'retrieval';
+
+ function?: AssistantToolChoiceFunction;
+}
+
+export interface AssistantToolChoiceFunction {
+ /**
+ * The name of the function to call.
+ */
+ name: string;
+}
+
+/**
+ * Controls which (if any) tool is called by the model. `none` means the model will
+ * not call any tools and instead generates a message. `auto` is the default value
+ * and means the model can pick between generating a message or calling a tool.
+ * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ * call that tool.
+ */
+export type AssistantToolChoiceOption = 'none' | 'auto' | AssistantToolChoice;
+
/**
* Represents a thread that contains
* [messages](https://platform.openai.com/docs/api-reference/messages).
@@ -232,6 +292,24 @@ export interface ThreadCreateAndRunParamsBase {
*/
instructions?: string | null;
+ /**
+ * The maximum number of completion tokens that may be used over the course of the
+ * run. The run will make a best effort to use only the number of completion tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * completion tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_completion_tokens?: number | null;
+
+ /**
+ * The maximum number of prompt tokens that may be used over the course of the run.
+ * The run will make a best effort to use only the number of prompt tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * prompt tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_prompt_tokens?: number | null;
+
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
* for storing additional information about the object in a structured format. Keys
@@ -246,7 +324,45 @@ export interface ThreadCreateAndRunParamsBase {
* model associated with the assistant. If not, the model associated with the
* assistant will be used.
*/
- model?: string | null;
+ model?:
+ | (string & {})
+ | 'gpt-4-turbo'
+ | 'gpt-4-turbo-2024-04-09'
+ | 'gpt-4-0125-preview'
+ | 'gpt-4-turbo-preview'
+ | 'gpt-4-1106-preview'
+ | 'gpt-4-vision-preview'
+ | 'gpt-4'
+ | 'gpt-4-0314'
+ | 'gpt-4-0613'
+ | 'gpt-4-32k'
+ | 'gpt-4-32k-0314'
+ | 'gpt-4-32k-0613'
+ | 'gpt-3.5-turbo'
+ | 'gpt-3.5-turbo-16k'
+ | 'gpt-3.5-turbo-0613'
+ | 'gpt-3.5-turbo-1106'
+ | 'gpt-3.5-turbo-0125'
+ | 'gpt-3.5-turbo-16k-0613'
+ | null;
+
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+ response_format?: AssistantResponseFormatOption | null;
/**
* If `true`, returns a stream of events that happen during the Run as server-sent
@@ -267,6 +383,16 @@ export interface ThreadCreateAndRunParamsBase {
*/
thread?: ThreadCreateAndRunParams.Thread;
+ /**
+ * Controls which (if any) tool is called by the model. `none` means the model will
+ * not call any tools and instead generates a message. `auto` is the default value
+ * and means the model can pick between generating a message or calling a tool.
+ * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ * call that tool.
+ */
+ tool_choice?: AssistantToolChoiceOption | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
@@ -274,6 +400,8 @@ export interface ThreadCreateAndRunParamsBase {
tools?: Array<
AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool
> | null;
+
+ truncation_strategy?: ThreadCreateAndRunParams.TruncationStrategy | null;
}
export namespace ThreadCreateAndRunParams {
@@ -331,6 +459,22 @@ export namespace ThreadCreateAndRunParams {
}
}
+ export interface TruncationStrategy {
+ /**
+ * The truncation strategy to use for the thread. The default is `auto`. If set to
+ * `last_messages`, the thread will be truncated to the n most recent messages in
+ * the thread. When set to `auto`, messages in the middle of the thread will be
+ * dropped to fit the context length of the model, `max_prompt_tokens`.
+ */
+ type: 'auto' | 'last_messages';
+
+ /**
+ * The number of most recent messages from the thread when constructing the context
+ * for the run.
+ */
+ last_messages?: number | null;
+ }
+
export type ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming;
export type ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming;
}
@@ -367,6 +511,24 @@ export interface ThreadCreateAndRunPollParams {
*/
instructions?: string | null;
+ /**
+ * The maximum number of completion tokens that may be used over the course of the
+ * run. The run will make a best effort to use only the number of completion tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * completion tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_completion_tokens?: number | null;
+
+ /**
+ * The maximum number of prompt tokens that may be used over the course of the run.
+ * The run will make a best effort to use only the number of prompt tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * prompt tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_prompt_tokens?: number | null;
+
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
* for storing additional information about the object in a structured format. Keys
@@ -381,7 +543,45 @@ export interface ThreadCreateAndRunPollParams {
* model associated with the assistant. If not, the model associated with the
* assistant will be used.
*/
- model?: string | null;
+ model?:
+ | (string & {})
+ | 'gpt-4-turbo'
+ | 'gpt-4-turbo-2024-04-09'
+ | 'gpt-4-0125-preview'
+ | 'gpt-4-turbo-preview'
+ | 'gpt-4-1106-preview'
+ | 'gpt-4-vision-preview'
+ | 'gpt-4'
+ | 'gpt-4-0314'
+ | 'gpt-4-0613'
+ | 'gpt-4-32k'
+ | 'gpt-4-32k-0314'
+ | 'gpt-4-32k-0613'
+ | 'gpt-3.5-turbo'
+ | 'gpt-3.5-turbo-16k'
+ | 'gpt-3.5-turbo-0613'
+ | 'gpt-3.5-turbo-1106'
+ | 'gpt-3.5-turbo-0125'
+ | 'gpt-3.5-turbo-16k-0613'
+ | null;
+
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+ response_format?: AssistantResponseFormatOption | null;
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
@@ -395,6 +595,16 @@ export interface ThreadCreateAndRunPollParams {
*/
thread?: ThreadCreateAndRunPollParams.Thread;
+ /**
+ * Controls which (if any) tool is called by the model. `none` means the model will
+ * not call any tools and instead generates a message. `auto` is the default value
+ * and means the model can pick between generating a message or calling a tool.
+ * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ * call that tool.
+ */
+ tool_choice?: AssistantToolChoiceOption | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
@@ -402,6 +612,8 @@ export interface ThreadCreateAndRunPollParams {
tools?: Array<
AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool
> | null;
+
+ truncation_strategy?: ThreadCreateAndRunPollParams.TruncationStrategy | null;
}
export namespace ThreadCreateAndRunPollParams {
@@ -458,6 +670,22 @@ export namespace ThreadCreateAndRunPollParams {
metadata?: unknown | null;
}
}
+
+ export interface TruncationStrategy {
+ /**
+ * The truncation strategy to use for the thread. The default is `auto`. If set to
+ * `last_messages`, the thread will be truncated to the n most recent messages in
+ * the thread. When set to `auto`, messages in the middle of the thread will be
+ * dropped to fit the context length of the model, `max_prompt_tokens`.
+ */
+ type: 'auto' | 'last_messages';
+
+ /**
+ * The number of most recent messages from the thread when constructing the context
+ * for the run.
+ */
+ last_messages?: number | null;
+ }
}
export interface ThreadCreateAndRunStreamParams {
@@ -474,6 +702,24 @@ export interface ThreadCreateAndRunStreamParams {
*/
instructions?: string | null;
+ /**
+ * The maximum number of completion tokens that may be used over the course of the
+ * run. The run will make a best effort to use only the number of completion tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * completion tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_completion_tokens?: number | null;
+
+ /**
+ * The maximum number of prompt tokens that may be used over the course of the run.
+ * The run will make a best effort to use only the number of prompt tokens
+ * specified, across multiple turns of the run. If the run exceeds the number of
+ * prompt tokens specified, the run will end with status `complete`. See
+ * `incomplete_details` for more info.
+ */
+ max_prompt_tokens?: number | null;
+
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful
* for storing additional information about the object in a structured format. Keys
@@ -488,7 +734,45 @@ export interface ThreadCreateAndRunStreamParams {
* model associated with the assistant. If not, the model associated with the
* assistant will be used.
*/
- model?: string | null;
+ model?:
+ | (string & {})
+ | 'gpt-4-turbo'
+ | 'gpt-4-turbo-2024-04-09'
+ | 'gpt-4-0125-preview'
+ | 'gpt-4-turbo-preview'
+ | 'gpt-4-1106-preview'
+ | 'gpt-4-vision-preview'
+ | 'gpt-4'
+ | 'gpt-4-0314'
+ | 'gpt-4-0613'
+ | 'gpt-4-32k'
+ | 'gpt-4-32k-0314'
+ | 'gpt-4-32k-0613'
+ | 'gpt-3.5-turbo'
+ | 'gpt-3.5-turbo-16k'
+ | 'gpt-3.5-turbo-0613'
+ | 'gpt-3.5-turbo-1106'
+ | 'gpt-3.5-turbo-0125'
+ | 'gpt-3.5-turbo-16k-0613'
+ | null;
+
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+ response_format?: AssistantResponseFormatOption | null;
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
@@ -502,6 +786,16 @@ export interface ThreadCreateAndRunStreamParams {
*/
thread?: ThreadCreateAndRunStreamParams.Thread;
+ /**
+ * Controls which (if any) tool is called by the model. `none` means the model will
+ * not call any tools and instead generates a message. `auto` is the default value
+ * and means the model can pick between generating a message or calling a tool.
+ * Specifying a particular tool like `{"type": "TOOL_TYPE"}` or
+ * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
+ * call that tool.
+ */
+ tool_choice?: AssistantToolChoiceOption | null;
+
/**
* Override the tools the assistant can use for this run. This is useful for
* modifying the behavior on a per-run basis.
@@ -509,6 +803,8 @@ export interface ThreadCreateAndRunStreamParams {
tools?: Array<
AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool
> | null;
+
+ truncation_strategy?: ThreadCreateAndRunStreamParams.TruncationStrategy | null;
}
export namespace ThreadCreateAndRunStreamParams {
@@ -565,9 +861,30 @@ export namespace ThreadCreateAndRunStreamParams {
metadata?: unknown | null;
}
}
+
+ export interface TruncationStrategy {
+ /**
+ * The truncation strategy to use for the thread. The default is `auto`. If set to
+ * `last_messages`, the thread will be truncated to the n most recent messages in
+ * the thread. When set to `auto`, messages in the middle of the thread will be
+ * dropped to fit the context length of the model, `max_prompt_tokens`.
+ */
+ type: 'auto' | 'last_messages';
+
+ /**
+ * The number of most recent messages from the thread when constructing the context
+ * for the run.
+ */
+ last_messages?: number | null;
+ }
}
export namespace Threads {
+ export import AssistantResponseFormat = ThreadsAPI.AssistantResponseFormat;
+ export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption;
+ export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice;
+ export import AssistantToolChoiceFunction = ThreadsAPI.AssistantToolChoiceFunction;
+ export import AssistantToolChoiceOption = ThreadsAPI.AssistantToolChoiceOption;
export import Thread = ThreadsAPI.Thread;
export import ThreadDeleted = ThreadsAPI.ThreadDeleted;
export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams;
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index 8119639f2..2288265ea 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -667,6 +667,8 @@ export interface ChatCompletionCreateParamsBase {
*/
model:
| (string & {})
+ | 'gpt-4-turbo'
+ | 'gpt-4-turbo-2024-04-09'
| 'gpt-4-0125-preview'
| 'gpt-4-turbo-preview'
| 'gpt-4-1106-preview'
@@ -730,8 +732,7 @@ export interface ChatCompletionCreateParamsBase {
/**
* Whether to return log probabilities of the output tokens or not. If true,
* returns the log probabilities of each output token returned in the `content` of
- * `message`. This option is currently not available on the `gpt-4-vision-preview`
- * model.
+ * `message`.
*/
logprobs?: boolean | null;
diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts
index e62f8f09c..c8d688b0c 100644
--- a/src/resources/fine-tuning/fine-tuning.ts
+++ b/src/resources/fine-tuning/fine-tuning.ts
@@ -1,7 +1,7 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from 'openai/resource';
-import * as JobsAPI from 'openai/resources/fine-tuning/jobs';
+import * as JobsAPI from 'openai/resources/fine-tuning/jobs/jobs';
export class FineTuning extends APIResource {
jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client);
@@ -11,6 +11,9 @@ export namespace FineTuning {
export import Jobs = JobsAPI.Jobs;
export import FineTuningJob = JobsAPI.FineTuningJob;
export import FineTuningJobEvent = JobsAPI.FineTuningJobEvent;
+ export import FineTuningJobIntegration = JobsAPI.FineTuningJobIntegration;
+ export import FineTuningJobWandbIntegration = JobsAPI.FineTuningJobWandbIntegration;
+ export import FineTuningJobWandbIntegrationObject = JobsAPI.FineTuningJobWandbIntegrationObject;
export import FineTuningJobsPage = JobsAPI.FineTuningJobsPage;
export import FineTuningJobEventsPage = JobsAPI.FineTuningJobEventsPage;
export import JobCreateParams = JobsAPI.JobCreateParams;
diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts
index 2885f62f4..1d8739a0a 100644
--- a/src/resources/fine-tuning/index.ts
+++ b/src/resources/fine-tuning/index.ts
@@ -4,10 +4,13 @@ export { FineTuning } from './fine-tuning';
export {
FineTuningJob,
FineTuningJobEvent,
+ FineTuningJobIntegration,
+ FineTuningJobWandbIntegration,
+ FineTuningJobWandbIntegrationObject,
JobCreateParams,
JobListParams,
JobListEventsParams,
FineTuningJobsPage,
FineTuningJobEventsPage,
Jobs,
-} from './jobs';
+} from './jobs/index';
diff --git a/src/resources/fine-tuning/jobs/checkpoints.ts b/src/resources/fine-tuning/jobs/checkpoints.ts
new file mode 100644
index 000000000..468cb3001
--- /dev/null
+++ b/src/resources/fine-tuning/jobs/checkpoints.ts
@@ -0,0 +1,108 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import { isRequestOptions } from 'openai/core';
+import * as CheckpointsAPI from 'openai/resources/fine-tuning/jobs/checkpoints';
+import { CursorPage, type CursorPageParams } from 'openai/pagination';
+
+export class Checkpoints extends APIResource {
+ /**
+ * List checkpoints for a fine-tuning job.
+ */
+ list(
+ fineTuningJobId: string,
+ query?: CheckpointListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ fineTuningJobId: string,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ fineTuningJobId: string,
+ query: CheckpointListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list(fineTuningJobId, {}, query);
+ }
+ return this._client.getAPIList(
+ `/fine_tuning/jobs/${fineTuningJobId}/checkpoints`,
+ FineTuningJobCheckpointsPage,
+ { query, ...options },
+ );
+ }
+}
+
+export class FineTuningJobCheckpointsPage extends CursorPage {}
+
+/**
+ * The `fine_tuning.job.checkpoint` object represents a model checkpoint for a
+ * fine-tuning job that is ready to use.
+ */
+export interface FineTuningJobCheckpoint {
+ /**
+ * The checkpoint identifier, which can be referenced in the API endpoints.
+ */
+ id: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the checkpoint was created.
+ */
+ created_at: number;
+
+ /**
+ * The name of the fine-tuned checkpoint model that is created.
+ */
+ fine_tuned_model_checkpoint: string;
+
+ /**
+ * The name of the fine-tuning job that this checkpoint was created from.
+ */
+ fine_tuning_job_id: string;
+
+ /**
+ * Metrics at the step number during the fine-tuning job.
+ */
+ metrics: FineTuningJobCheckpoint.Metrics;
+
+ /**
+ * The object type, which is always "fine_tuning.job.checkpoint".
+ */
+ object: 'fine_tuning.job.checkpoint';
+
+ /**
+ * The step number that the checkpoint was created at.
+ */
+ step_number: number;
+}
+
+export namespace FineTuningJobCheckpoint {
+ /**
+ * Metrics at the step number during the fine-tuning job.
+ */
+ export interface Metrics {
+ full_valid_loss?: number;
+
+ full_valid_mean_token_accuracy?: number;
+
+ step?: number;
+
+ train_loss?: number;
+
+ train_mean_token_accuracy?: number;
+
+ valid_loss?: number;
+
+ valid_mean_token_accuracy?: number;
+ }
+}
+
+export interface CheckpointListParams extends CursorPageParams {}
+
+export namespace Checkpoints {
+ export import FineTuningJobCheckpoint = CheckpointsAPI.FineTuningJobCheckpoint;
+ export import FineTuningJobCheckpointsPage = CheckpointsAPI.FineTuningJobCheckpointsPage;
+ export import CheckpointListParams = CheckpointsAPI.CheckpointListParams;
+}
diff --git a/src/resources/fine-tuning/jobs/index.ts b/src/resources/fine-tuning/jobs/index.ts
new file mode 100644
index 000000000..275c776e9
--- /dev/null
+++ b/src/resources/fine-tuning/jobs/index.ts
@@ -0,0 +1,21 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export {
+ FineTuningJob,
+ FineTuningJobEvent,
+ FineTuningJobIntegration,
+ FineTuningJobWandbIntegration,
+ FineTuningJobWandbIntegrationObject,
+ JobCreateParams,
+ JobListParams,
+ JobListEventsParams,
+ FineTuningJobsPage,
+ FineTuningJobEventsPage,
+ Jobs,
+} from './jobs';
+export {
+ FineTuningJobCheckpoint,
+ CheckpointListParams,
+ FineTuningJobCheckpointsPage,
+ Checkpoints,
+} from './checkpoints';
diff --git a/src/resources/fine-tuning/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts
similarity index 66%
rename from src/resources/fine-tuning/jobs.ts
rename to src/resources/fine-tuning/jobs/jobs.ts
index eb77405ca..10b3d38d2 100644
--- a/src/resources/fine-tuning/jobs.ts
+++ b/src/resources/fine-tuning/jobs/jobs.ts
@@ -3,10 +3,13 @@
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
import { isRequestOptions } from 'openai/core';
-import * as JobsAPI from 'openai/resources/fine-tuning/jobs';
+import * as JobsAPI from 'openai/resources/fine-tuning/jobs/jobs';
+import * as CheckpointsAPI from 'openai/resources/fine-tuning/jobs/checkpoints';
import { CursorPage, type CursorPageParams } from 'openai/pagination';
export class Jobs extends APIResource {
+ checkpoints: CheckpointsAPI.Checkpoints = new CheckpointsAPI.Checkpoints(this._client);
+
/**
* Creates a fine-tuning job which begins the process of creating a new model from
* a given dataset.
@@ -147,6 +150,11 @@ export interface FineTuningJob {
*/
result_files: Array;
+ /**
+ * The seed used for the fine-tuning job.
+ */
+ seed: number;
+
/**
* The current status of the fine-tuning job, which can be either
* `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.
@@ -171,6 +179,11 @@ export interface FineTuningJob {
* [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
*/
validation_file: string | null;
+
+ /**
+ * A list of integrations to enable for this fine-tuning job.
+ */
+ integrations?: Array | null;
}
export namespace FineTuningJob {
@@ -227,6 +240,56 @@ export interface FineTuningJobEvent {
object: 'fine_tuning.job.event';
}
+export type FineTuningJobIntegration = FineTuningJobWandbIntegrationObject;
+
+/**
+ * The settings for your integration with Weights and Biases. This payload
+ * specifies the project that metrics will be sent to. Optionally, you can set an
+ * explicit display name for your run, add tags to your run, and set a default
+ * entity (team, username, etc) to be associated with your run.
+ */
+export interface FineTuningJobWandbIntegration {
+ /**
+ * The name of the project that the new run will be created under.
+ */
+ project: string;
+
+ /**
+ * The entity to use for the run. This allows you to set the team or username of
+ * the WandB user that you would like associated with the run. If not set, the
+ * default entity for the registered WandB API key is used.
+ */
+ entity?: string | null;
+
+ /**
+ * A display name to set for the run. If not set, we will use the Job ID as the
+ * name.
+ */
+ name?: string | null;
+
+ /**
+ * A list of tags to be attached to the newly created run. These tags are passed
+ * through directly to WandB. Some default tags are generated by OpenAI:
+ * "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
+ */
+ tags?: Array;
+}
+
+export interface FineTuningJobWandbIntegrationObject {
+ /**
+ * The type of the integration being enabled for the fine-tuning job
+ */
+ type: 'wandb';
+
+ /**
+ * The settings for your integration with Weights and Biases. This payload
+ * specifies the project that metrics will be sent to. Optionally, you can set an
+ * explicit display name for your run, add tags to your run, and set a default
+ * entity (team, username, etc) to be associated with your run.
+ */
+ wandb: FineTuningJobWandbIntegration;
+}
+
export interface JobCreateParams {
/**
* The name of the model to fine-tune. You can select one of the
@@ -253,6 +316,18 @@ export interface JobCreateParams {
*/
hyperparameters?: JobCreateParams.Hyperparameters;
+ /**
+ * A list of integrations to enable for your fine-tuning job.
+ */
+ integrations?: Array | null;
+
+ /**
+ * The seed controls the reproducibility of the job. Passing in the same seed and
+ * job parameters should produce the same results, but may differ in rare cases. If
+ * a seed is not specified, one will be generated for you.
+ */
+ seed?: number | null;
+
/**
* A string of up to 18 characters that will be added to your fine-tuned model
* name.
@@ -302,6 +377,57 @@ export namespace JobCreateParams {
*/
n_epochs?: 'auto' | number;
}
+
+ export interface Integration {
+ /**
+ * The type of integration to enable. Currently, only "wandb" (Weights and Biases)
+ * is supported.
+ */
+ type: 'wandb';
+
+ /**
+ * The settings for your integration with Weights and Biases. This payload
+ * specifies the project that metrics will be sent to. Optionally, you can set an
+ * explicit display name for your run, add tags to your run, and set a default
+ * entity (team, username, etc) to be associated with your run.
+ */
+ wandb: Integration.Wandb;
+ }
+
+ export namespace Integration {
+ /**
+ * The settings for your integration with Weights and Biases. This payload
+ * specifies the project that metrics will be sent to. Optionally, you can set an
+ * explicit display name for your run, add tags to your run, and set a default
+ * entity (team, username, etc) to be associated with your run.
+ */
+ export interface Wandb {
+ /**
+ * The name of the project that the new run will be created under.
+ */
+ project: string;
+
+ /**
+ * The entity to use for the run. This allows you to set the team or username of
+ * the WandB user that you would like associated with the run. If not set, the
+ * default entity for the registered WandB API key is used.
+ */
+ entity?: string | null;
+
+ /**
+ * A display name to set for the run. If not set, we will use the Job ID as the
+ * name.
+ */
+ name?: string | null;
+
+ /**
+ * A list of tags to be attached to the newly created run. These tags are passed
+ * through directly to WandB. Some default tags are generated by OpenAI:
+ * "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
+ */
+ tags?: Array;
+ }
+ }
}
export interface JobListParams extends CursorPageParams {}
@@ -311,9 +437,16 @@ export interface JobListEventsParams extends CursorPageParams {}
export namespace Jobs {
export import FineTuningJob = JobsAPI.FineTuningJob;
export import FineTuningJobEvent = JobsAPI.FineTuningJobEvent;
+ export import FineTuningJobIntegration = JobsAPI.FineTuningJobIntegration;
+ export import FineTuningJobWandbIntegration = JobsAPI.FineTuningJobWandbIntegration;
+ export import FineTuningJobWandbIntegrationObject = JobsAPI.FineTuningJobWandbIntegrationObject;
export import FineTuningJobsPage = JobsAPI.FineTuningJobsPage;
export import FineTuningJobEventsPage = JobsAPI.FineTuningJobEventsPage;
export import JobCreateParams = JobsAPI.JobCreateParams;
export import JobListParams = JobsAPI.JobListParams;
export import JobListEventsParams = JobsAPI.JobListEventsParams;
+ export import Checkpoints = CheckpointsAPI.Checkpoints;
+ export import FineTuningJobCheckpoint = CheckpointsAPI.FineTuningJobCheckpoint;
+ export import FineTuningJobCheckpointsPage = CheckpointsAPI.FineTuningJobCheckpointsPage;
+ export import CheckpointListParams = CheckpointsAPI.CheckpointListParams;
}
diff --git a/tests/api-resources/beta/assistants/assistants.test.ts b/tests/api-resources/beta/assistants/assistants.test.ts
index b11075d06..62282148d 100644
--- a/tests/api-resources/beta/assistants/assistants.test.ts
+++ b/tests/api-resources/beta/assistants/assistants.test.ts
@@ -10,7 +10,7 @@ const openai = new OpenAI({
describe('resource assistants', () => {
test('create: only required params', async () => {
- const responsePromise = openai.beta.assistants.create({ model: 'string' });
+ const responsePromise = openai.beta.assistants.create({ model: 'gpt-4-turbo' });
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
@@ -22,7 +22,7 @@ describe('resource assistants', () => {
test('create: required and optional params', async () => {
const response = await openai.beta.assistants.create({
- model: 'string',
+ model: 'gpt-4-turbo',
description: 'string',
file_ids: ['string', 'string', 'string'],
instructions: 'string',
diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts
index 2911cfd53..2489d56e2 100644
--- a/tests/api-resources/beta/threads/runs/runs.test.ts
+++ b/tests/api-resources/beta/threads/runs/runs.test.ts
@@ -30,11 +30,16 @@ describe('resource runs', () => {
{ role: 'user', content: 'x', file_ids: ['string'], metadata: {} },
],
instructions: 'string',
+ max_completion_tokens: 256,
+ max_prompt_tokens: 256,
metadata: {},
- model: 'string',
+ model: 'gpt-4-turbo',
+ response_format: 'none',
stream: false,
temperature: 1,
+ tool_choice: 'none',
tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
+ truncation_strategy: { type: 'auto', last_messages: 1 },
});
});
diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts
index 3606019bd..028a150f4 100644
--- a/tests/api-resources/beta/threads/threads.test.ts
+++ b/tests/api-resources/beta/threads/threads.test.ts
@@ -106,8 +106,11 @@ describe('resource threads', () => {
const response = await openai.beta.threads.createAndRun({
assistant_id: 'string',
instructions: 'string',
+ max_completion_tokens: 256,
+ max_prompt_tokens: 256,
metadata: {},
- model: 'string',
+ model: 'gpt-4-turbo',
+ response_format: 'none',
stream: false,
temperature: 1,
thread: {
@@ -118,7 +121,9 @@ describe('resource threads', () => {
],
metadata: {},
},
+ tool_choice: 'none',
tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
+ truncation_strategy: { type: 'auto', last_messages: 1 },
});
});
});
diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts
index e0ccb3910..bd398b91d 100644
--- a/tests/api-resources/chat/completions.test.ts
+++ b/tests/api-resources/chat/completions.test.ts
@@ -12,7 +12,7 @@ describe('resource completions', () => {
test('create: only required params', async () => {
const responsePromise = openai.chat.completions.create({
messages: [{ content: 'string', role: 'system' }],
- model: 'gpt-3.5-turbo',
+ model: 'gpt-4-turbo',
});
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
@@ -26,7 +26,7 @@ describe('resource completions', () => {
test('create: required and optional params', async () => {
const response = await openai.chat.completions.create({
messages: [{ content: 'string', role: 'system', name: 'string' }],
- model: 'gpt-3.5-turbo',
+ model: 'gpt-4-turbo',
frequency_penalty: -2,
function_call: 'none',
functions: [{ description: 'string', name: 'string', parameters: { foo: 'bar' } }],
diff --git a/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts b/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts
new file mode 100644
index 000000000..1844d7c87
--- /dev/null
+++ b/tests/api-resources/fine-tuning/jobs/checkpoints.test.ts
@@ -0,0 +1,42 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import OpenAI from 'openai';
+import { Response } from 'node-fetch';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/',
+});
+
+describe('resource checkpoints', () => {
+ test('list', async () => {
+ const responsePromise = openai.fineTuning.jobs.checkpoints.list('ft-AF1WoRqd3aJAHsqc9NY7iL8F');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.fineTuning.jobs.checkpoints.list('ft-AF1WoRqd3aJAHsqc9NY7iL8F', {
+ path: '/_stainless_unknown_path',
+ }),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ openai.fineTuning.jobs.checkpoints.list(
+ 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',
+ { after: 'string', limit: 0 },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(OpenAI.NotFoundError);
+ });
+});
diff --git a/tests/api-resources/fine-tuning/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts
similarity index 87%
rename from tests/api-resources/fine-tuning/jobs.test.ts
rename to tests/api-resources/fine-tuning/jobs/jobs.test.ts
index d8f230abd..d2207cd97 100644
--- a/tests/api-resources/fine-tuning/jobs.test.ts
+++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts
@@ -28,6 +28,36 @@ describe('resource jobs', () => {
model: 'gpt-3.5-turbo',
training_file: 'file-abc123',
hyperparameters: { batch_size: 'auto', learning_rate_multiplier: 'auto', n_epochs: 'auto' },
+ integrations: [
+ {
+ type: 'wandb',
+ wandb: {
+ project: 'my-wandb-project',
+ name: 'string',
+ entity: 'string',
+ tags: ['custom-tag', 'custom-tag', 'custom-tag'],
+ },
+ },
+ {
+ type: 'wandb',
+ wandb: {
+ project: 'my-wandb-project',
+ name: 'string',
+ entity: 'string',
+ tags: ['custom-tag', 'custom-tag', 'custom-tag'],
+ },
+ },
+ {
+ type: 'wandb',
+ wandb: {
+ project: 'my-wandb-project',
+ name: 'string',
+ entity: 'string',
+ tags: ['custom-tag', 'custom-tag', 'custom-tag'],
+ },
+ },
+ ],
+ seed: 42,
suffix: 'x',
validation_file: 'file-abc123',
});
From 0c75bbdb2022a5acf4e4b5e2997854f7784e46b7 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 15 Apr 2024 16:09:44 -0400
Subject: [PATCH 060/533] feat(api): add batch API (#768)
https://platform.openai.com/docs/api-reference/batch/create
---
.stats.yml | 2 +-
api.md | 14 ++
src/index.ts | 7 +
src/resources/batches.ts | 225 ++++++++++++++++++++++++++++
src/resources/index.ts | 1 +
tests/api-resources/batches.test.ts | 71 +++++++++
6 files changed, 319 insertions(+), 1 deletion(-)
create mode 100644 src/resources/batches.ts
create mode 100644 tests/api-resources/batches.test.ts
diff --git a/.stats.yml b/.stats.yml
index 284caebf4..47c2bce1c 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1 @@
-configured_endpoints: 52
+configured_endpoints: 55
diff --git a/api.md b/api.md
index c6a2bf273..02030dc07 100644
--- a/api.md
+++ b/api.md
@@ -337,3 +337,17 @@ Methods:
- client.beta.threads.messages.files.retrieve(threadId, messageId, fileId) -> MessageFile
- client.beta.threads.messages.files.list(threadId, messageId, { ...params }) -> MessageFilesPage
+
+# Batches
+
+Types:
+
+- Batch
+- BatchError
+- BatchRequestCounts
+
+Methods:
+
+- client.batches.create({ ...params }) -> Batch
+- client.batches.retrieve(batchId) -> Batch
+- client.batches.cancel(batchId) -> Batch
diff --git a/src/index.ts b/src/index.ts
index 9a2b2eaad..84fdd3979 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -150,6 +150,7 @@ export class OpenAI extends Core.APIClient {
models: API.Models = new API.Models(this);
fineTuning: API.FineTuning = new API.FineTuning(this);
beta: API.Beta = new API.Beta(this);
+ batches: API.Batches = new API.Batches(this);
protected override defaultQuery(): Core.DefaultQuery | undefined {
return this._options.defaultQuery;
@@ -285,6 +286,12 @@ export namespace OpenAI {
export import Beta = API.Beta;
+ export import Batches = API.Batches;
+ export import Batch = API.Batch;
+ export import BatchError = API.BatchError;
+ export import BatchRequestCounts = API.BatchRequestCounts;
+ export import BatchCreateParams = API.BatchCreateParams;
+
export import ErrorObject = API.ErrorObject;
export import FunctionDefinition = API.FunctionDefinition;
export import FunctionParameters = API.FunctionParameters;
diff --git a/src/resources/batches.ts b/src/resources/batches.ts
new file mode 100644
index 000000000..75b491a16
--- /dev/null
+++ b/src/resources/batches.ts
@@ -0,0 +1,225 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import * as Core from 'openai/core';
+import { APIResource } from 'openai/resource';
+import * as BatchesAPI from 'openai/resources/batches';
+
+export class Batches extends APIResource {
+ /**
+ * Creates and executes a batch from an uploaded file of requests
+ */
+ create(body: BatchCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post('/batches', { body, ...options });
+ }
+
+ /**
+ * Retrieves a batch.
+ */
+ retrieve(batchId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.get(`/batches/${batchId}`, options);
+ }
+
+ /**
+ * Cancels an in-progress batch.
+ */
+ cancel(batchId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post(`/batches/${batchId}/cancel`, options);
+ }
+}
+
+export interface Batch {
+ id: string;
+
+ /**
+ * The time frame within which the batch should be processed.
+ */
+ completion_window: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the batch was created.
+ */
+ created_at: string;
+
+ /**
+ * The OpenAI API endpoint used by the batch.
+ */
+ endpoint: string;
+
+ /**
+ * The ID of the input file for the batch.
+ */
+ input_file_id: string;
+
+ /**
+ * The object type, which is always `batch`.
+ */
+ object: 'batch';
+
+ /**
+ * The current status of the batch.
+ */
+ status:
+ | 'validating'
+ | 'failed'
+ | 'in_progress'
+ | 'finalizing'
+ | 'completed'
+ | 'expired'
+ | 'cancelling'
+ | 'cancelled';
+
+ /**
+ * The Unix timestamp (in seconds) for when the batch was cancelled.
+ */
+ cancelled_at?: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the batch started cancelling.
+ */
+ cancelling_at?: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the batch was completed.
+ */
+ completed_at?: string;
+
+ /**
+ * The ID of the file containing the outputs of requests with errors.
+ */
+ error_file_id?: string;
+
+ errors?: Batch.Errors;
+
+ /**
+ * The Unix timestamp (in seconds) for when the batch expired.
+ */
+ expired_at?: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the batch will expire.
+ */
+ expires_at?: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the batch failed.
+ */
+ failed_at?: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the batch started finalizing.
+ */
+ finalizing_at?: string;
+
+ /**
+ * The Unix timestamp (in seconds) for when the batch started processing.
+ */
+ in_progress_at?: string;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The ID of the file containing the outputs of successfully executed requests.
+ */
+ output_file_id?: string;
+
+ /**
+ * The request counts for different statuses within the batch.
+ */
+ request_counts?: BatchRequestCounts;
+}
+
+export namespace Batch {
+ export interface Errors {
+ data?: Array;
+
+ /**
+ * The object type, which is always `list`.
+ */
+ object?: string;
+ }
+}
+
+export interface BatchError {
+ /**
+ * An error code identifying the error type.
+ */
+ code?: string;
+
+ /**
+ * The line number of the input file where the error occurred, if applicable.
+ */
+ line?: number | null;
+
+ /**
+ * A human-readable message providing more details about the error.
+ */
+ message?: string;
+
+ /**
+ * The name of the parameter that caused the error, if applicable.
+ */
+ param?: string | null;
+}
+
+/**
+ * The request counts for different statuses within the batch.
+ */
+export interface BatchRequestCounts {
+ /**
+ * Number of requests that have been completed successfully.
+ */
+ completed: number;
+
+ /**
+ * Number of requests that have failed.
+ */
+ failed: number;
+
+ /**
+ * Total number of requests in the batch.
+ */
+ total: number;
+}
+
+export interface BatchCreateParams {
+ /**
+ * The time frame within which the batch should be processed. Currently only `24h`
+ * is supported.
+ */
+ completion_window: '24h';
+
+ /**
+ * The endpoint to be used for all requests in the batch. Currently only
+ * `/v1/chat/completions` is supported.
+ */
+ endpoint: '/v1/chat/completions';
+
+ /**
+ * The ID of an uploaded file that contains requests for the new batch.
+ *
+ * See [upload file](https://platform.openai.com/docs/api-reference/files/create)
+ * for how to upload a file.
+ *
+ * Your input file must be formatted as a JSONL file, and must be uploaded with the
+ * purpose `batch`.
+ */
+ input_file_id: string;
+
+ /**
+ * Optional custom metadata for the batch.
+ */
+ metadata?: Record | null;
+}
+
+export namespace Batches {
+ export import Batch = BatchesAPI.Batch;
+ export import BatchError = BatchesAPI.BatchError;
+ export import BatchRequestCounts = BatchesAPI.BatchRequestCounts;
+ export import BatchCreateParams = BatchesAPI.BatchCreateParams;
+}
diff --git a/src/resources/index.ts b/src/resources/index.ts
index a9741f5fd..282e57ea1 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -3,6 +3,7 @@
export * from './chat/index';
export * from './shared';
export { Audio } from './audio/audio';
+export { Batch, BatchError, BatchRequestCounts, BatchCreateParams, Batches } from './batches';
export { Beta } from './beta/beta';
export {
Completion,
diff --git a/tests/api-resources/batches.test.ts b/tests/api-resources/batches.test.ts
new file mode 100644
index 000000000..e4a9015d1
--- /dev/null
+++ b/tests/api-resources/batches.test.ts
@@ -0,0 +1,71 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import OpenAI from 'openai';
+import { Response } from 'node-fetch';
+
+const openai = new OpenAI({
+ apiKey: 'My API Key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/',
+});
+
+describe('resource batches', () => {
+ test('create: only required params', async () => {
+ const responsePromise = openai.batches.create({
+ completion_window: '24h',
+ endpoint: '/v1/chat/completions',
+ input_file_id: 'string',
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await openai.batches.create({
+ completion_window: '24h',
+ endpoint: '/v1/chat/completions',
+ input_file_id: 'string',
+ metadata: { foo: 'string' },
+ });
+ });
+
+ test('retrieve', async () => {
+ const responsePromise = openai.batches.retrieve('string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(openai.batches.retrieve('string', { path: '/_stainless_unknown_path' })).rejects.toThrow(
+ OpenAI.NotFoundError,
+ );
+ });
+
+ test('cancel', async () => {
+ const responsePromise = openai.batches.cancel('string');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('cancel: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(openai.batches.cancel('string', { path: '/_stainless_unknown_path' })).rejects.toThrow(
+ OpenAI.NotFoundError,
+ );
+ });
+});
From 56f4821ac2f86e60231c31c1e007d68176100c7c Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 15 Apr 2024 16:10:04 -0400
Subject: [PATCH 061/533] release: 4.34.0
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 9 +++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 14 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index bd6b3284c..80372a7f2 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.33.1"
+ ".": "4.34.0"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f3067e694..4a253da33 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,14 @@
# Changelog
+## 4.34.0 (2024-04-15)
+
+Full Changelog: [v4.33.1...v4.34.0](https://github.com/openai/openai-node/compare/v4.33.1...v4.34.0)
+
+### Features
+
+* **api:** add batch API ([#768](https://github.com/openai/openai-node/issues/768)) ([7fe34f2](https://github.com/openai/openai-node/commit/7fe34f2d0bda9c1cb116a593f02bd0cc15a52e12))
+* **api:** updates ([#766](https://github.com/openai/openai-node/issues/766)) ([52bcc47](https://github.com/openai/openai-node/commit/52bcc47043e4c3ffe15ae9e7ac0fa87e2493aad9))
+
## 4.33.1 (2024-04-12)
Full Changelog: [v4.33.0...v4.33.1](https://github.com/openai/openai-node/compare/v4.33.0...v4.33.1)
diff --git a/README.md b/README.md
index 2d1ae6089..774fd6b76 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.33.1/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.34.0/mod.ts';
```
diff --git a/build-deno b/build-deno
index c06cd3bcf..d9e64064c 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.33.1/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.34.0/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index 998b6a2c7..a821e7f72 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.33.1",
+ "version": "4.34.0",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index 0d8f2ffd7..3577d3d22 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.33.1'; // x-release-please-version
+export const VERSION = '4.34.0'; // x-release-please-version
From 8cdd7ea9a28455c84f2babaea998fde228287146 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 15 Apr 2024 17:33:50 -0400
Subject: [PATCH 062/533] feat(errors): add request_id property (#769)
---
src/error.ts | 3 +++
1 file changed, 3 insertions(+)
diff --git a/src/error.ts b/src/error.ts
index deac34c5d..19a60598a 100644
--- a/src/error.ts
+++ b/src/error.ts
@@ -13,6 +13,8 @@ export class APIError extends OpenAIError {
readonly param: string | null | undefined;
readonly type: string | undefined;
+ readonly request_id: string | null | undefined;
+
constructor(
status: number | undefined,
error: Object | undefined,
@@ -22,6 +24,7 @@ export class APIError extends OpenAIError {
super(`${APIError.makeMessage(status, error, message)}`);
this.status = status;
this.headers = headers;
+ this.request_id = headers?.['x-request-id'];
const data = error as Record;
this.error = data;
From 116e38aae33a2d7b88c27d783a95b41e56500600 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 15 Apr 2024 17:34:10 -0400
Subject: [PATCH 063/533] release: 4.35.0
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 8 ++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 80372a7f2..c63d8fd43 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.34.0"
+ ".": "4.35.0"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4a253da33..48a52d258 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,13 @@
# Changelog
+## 4.35.0 (2024-04-15)
+
+Full Changelog: [v4.34.0...v4.35.0](https://github.com/openai/openai-node/compare/v4.34.0...v4.35.0)
+
+### Features
+
+* **errors:** add request_id property ([#769](https://github.com/openai/openai-node/issues/769)) ([43aa6a1](https://github.com/openai/openai-node/commit/43aa6a19cfb1448903dfaddc4da3def2eda9cbab))
+
## 4.34.0 (2024-04-15)
Full Changelog: [v4.33.1...v4.34.0](https://github.com/openai/openai-node/compare/v4.33.1...v4.34.0)
diff --git a/README.md b/README.md
index 774fd6b76..c32fcfcd9 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.34.0/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.35.0/mod.ts';
```
diff --git a/build-deno b/build-deno
index d9e64064c..ffdb2cb9d 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.34.0/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.35.0/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index a821e7f72..d57fe15cd 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.34.0",
+ "version": "4.35.0",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index 3577d3d22..7ca672a0d 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.34.0'; // x-release-please-version
+export const VERSION = '4.35.0'; // x-release-please-version
From 7fa4400668977e4265bd26591a4712546e54892f Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 16 Apr 2024 09:17:37 -0400
Subject: [PATCH 064/533] feat(client): add header OpenAI-Project (#772)
---
src/index.ts | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/src/index.ts b/src/index.ts
index 84fdd3979..91267cfc0 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -18,6 +18,11 @@ export interface ClientOptions {
*/
organization?: string | null | undefined;
+ /**
+ * Defaults to process.env['OPENAI_PROJECT_ID'].
+ */
+ project?: string | null | undefined;
+
/**
* Override the default base URL for the API, e.g., "https://api.example.com/v2/"
*
@@ -85,6 +90,7 @@ export interface ClientOptions {
export class OpenAI extends Core.APIClient {
apiKey: string;
organization: string | null;
+ project: string | null;
private _options: ClientOptions;
@@ -93,6 +99,7 @@ export class OpenAI extends Core.APIClient {
*
* @param {string | undefined} [opts.apiKey=process.env['OPENAI_API_KEY'] ?? undefined]
* @param {string | null | undefined} [opts.organization=process.env['OPENAI_ORG_ID'] ?? null]
+ * @param {string | null | undefined} [opts.project=process.env['OPENAI_PROJECT_ID'] ?? null]
* @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL'] ?? https://api.openai.com/v1] - Override the default base URL for the API.
* @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
* @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections.
@@ -106,6 +113,7 @@ export class OpenAI extends Core.APIClient {
baseURL = Core.readEnv('OPENAI_BASE_URL'),
apiKey = Core.readEnv('OPENAI_API_KEY'),
organization = Core.readEnv('OPENAI_ORG_ID') ?? null,
+ project = Core.readEnv('OPENAI_PROJECT_ID') ?? null,
...opts
}: ClientOptions = {}) {
if (apiKey === undefined) {
@@ -117,6 +125,7 @@ export class OpenAI extends Core.APIClient {
const options: ClientOptions = {
apiKey,
organization,
+ project,
...opts,
baseURL: baseURL || `https://api.openai.com/v1`,
};
@@ -138,6 +147,7 @@ export class OpenAI extends Core.APIClient {
this.apiKey = apiKey;
this.organization = organization;
+ this.project = project;
}
completions: API.Completions = new API.Completions(this);
@@ -160,6 +170,7 @@ export class OpenAI extends Core.APIClient {
return {
...super.defaultHeaders(opts),
'OpenAI-Organization': this.organization,
+ 'OpenAI-Project': this.project,
...this._options.defaultHeaders,
};
}
From be0dd9f7b5341adbb32ae3f55853405d6c4039f0 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 16 Apr 2024 11:37:59 -0400
Subject: [PATCH 065/533] build: configure UTF-8 locale in devcontainer (#774)
---
.devcontainer/Dockerfile | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index d03365a2b..8ea34be96 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -7,6 +7,10 @@ RUN apt-get update && apt-get install -y \
yarnpkg \
&& apt-get clean autoclean
+# Ensure UTF-8 encoding
+ENV LANG=C.UTF-8
+ENV LC_ALL=C.UTF-8
+
# Yarn
RUN ln -sf /usr/bin/yarnpkg /usr/bin/yarn
From 9dfc744169262d1a57a7be0453780dd77a726b6f Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 16 Apr 2024 12:54:04 -0400
Subject: [PATCH 066/533] feat: extract chat models to a named enum (#775)
---
api.md | 4 ++++
src/index.ts | 1 +
src/resources/chat/chat.ts | 23 +++++++++++++++++++++++
src/resources/chat/completions.ts | 23 ++---------------------
src/resources/chat/index.ts | 2 +-
5 files changed, 31 insertions(+), 22 deletions(-)
diff --git a/api.md b/api.md
index 02030dc07..7557ce133 100644
--- a/api.md
+++ b/api.md
@@ -20,6 +20,10 @@ Methods:
# Chat
+Types:
+
+- ChatModel
+
## Completions
Types:
diff --git a/src/index.ts b/src/index.ts
index 91267cfc0..7a776b2c1 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -238,6 +238,7 @@ export namespace OpenAI {
export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming;
export import Chat = API.Chat;
+ export import ChatModel = API.ChatModel;
export import ChatCompletion = API.ChatCompletion;
export import ChatCompletionAssistantMessageParam = API.ChatCompletionAssistantMessageParam;
export import ChatCompletionChunk = API.ChatCompletionChunk;
diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts
index 6c7bccb22..fa681ed64 100644
--- a/src/resources/chat/chat.ts
+++ b/src/resources/chat/chat.ts
@@ -1,13 +1,36 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from 'openai/resource';
+import * as ChatAPI from 'openai/resources/chat/chat';
import * as CompletionsAPI from 'openai/resources/chat/completions';
export class Chat extends APIResource {
completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client);
}
+export type ChatModel =
+ | 'gpt-4-turbo'
+ | 'gpt-4-turbo-2024-04-09'
+ | 'gpt-4-0125-preview'
+ | 'gpt-4-turbo-preview'
+ | 'gpt-4-1106-preview'
+ | 'gpt-4-vision-preview'
+ | 'gpt-4'
+ | 'gpt-4-0314'
+ | 'gpt-4-0613'
+ | 'gpt-4-32k'
+ | 'gpt-4-32k-0314'
+ | 'gpt-4-32k-0613'
+ | 'gpt-3.5-turbo'
+ | 'gpt-3.5-turbo-16k'
+ | 'gpt-3.5-turbo-0301'
+ | 'gpt-3.5-turbo-0613'
+ | 'gpt-3.5-turbo-1106'
+ | 'gpt-3.5-turbo-0125'
+ | 'gpt-3.5-turbo-16k-0613';
+
export namespace Chat {
+ export import ChatModel = ChatAPI.ChatModel;
export import Completions = CompletionsAPI.Completions;
export import ChatCompletion = CompletionsAPI.ChatCompletion;
export import ChatCompletionAssistantMessageParam = CompletionsAPI.ChatCompletionAssistantMessageParam;
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index 2288265ea..b9672f52b 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -6,6 +6,7 @@ import { APIResource } from 'openai/resource';
import * as ChatCompletionsAPI from 'openai/resources/chat/completions';
import * as CompletionsAPI from 'openai/resources/completions';
import * as Shared from 'openai/resources/shared';
+import * as ChatAPI from 'openai/resources/chat/chat';
import { Stream } from 'openai/streaming';
export class Completions extends APIResource {
@@ -665,27 +666,7 @@ export interface ChatCompletionCreateParamsBase {
* [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
* table for details on which models work with the Chat API.
*/
- model:
- | (string & {})
- | 'gpt-4-turbo'
- | 'gpt-4-turbo-2024-04-09'
- | 'gpt-4-0125-preview'
- | 'gpt-4-turbo-preview'
- | 'gpt-4-1106-preview'
- | 'gpt-4-vision-preview'
- | 'gpt-4'
- | 'gpt-4-0314'
- | 'gpt-4-0613'
- | 'gpt-4-32k'
- | 'gpt-4-32k-0314'
- | 'gpt-4-32k-0613'
- | 'gpt-3.5-turbo'
- | 'gpt-3.5-turbo-16k'
- | 'gpt-3.5-turbo-0301'
- | 'gpt-3.5-turbo-0613'
- | 'gpt-3.5-turbo-1106'
- | 'gpt-3.5-turbo-0125'
- | 'gpt-3.5-turbo-16k-0613';
+ model: (string & {}) | ChatAPI.ChatModel;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their
diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts
index 78a7516ed..ef72bbbc9 100644
--- a/src/resources/chat/index.ts
+++ b/src/resources/chat/index.ts
@@ -1,6 +1,5 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-export { Chat } from './chat';
export {
ChatCompletion,
ChatCompletionAssistantMessageParam,
@@ -30,3 +29,4 @@ export {
CompletionCreateParamsStreaming,
Completions,
} from './completions';
+export { ChatModel, Chat } from './chat';
From 6f72e7ad3e4e151c9334f4449d1c3555255c2793 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Tue, 16 Apr 2024 12:54:23 -0400
Subject: [PATCH 067/533] release: 4.36.0
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 14 ++++++++++++++
README.md | 2 +-
build-deno | 2 +-
package.json | 2 +-
src/version.ts | 2 +-
6 files changed, 19 insertions(+), 5 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index c63d8fd43..c1ce2c41b 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.35.0"
+ ".": "4.36.0"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 48a52d258..3ddd03a8b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,19 @@
# Changelog
+## 4.36.0 (2024-04-16)
+
+Full Changelog: [v4.35.0...v4.36.0](https://github.com/openai/openai-node/compare/v4.35.0...v4.36.0)
+
+### Features
+
+* **client:** add header OpenAI-Project ([#772](https://github.com/openai/openai-node/issues/772)) ([bb4df37](https://github.com/openai/openai-node/commit/bb4df3722082fb44b7d4feb7a47df796149150a2))
+* extract chat models to a named enum ([#775](https://github.com/openai/openai-node/issues/775)) ([141d2ed](https://github.com/openai/openai-node/commit/141d2ed308141dc751869353208e4d0632d3650c))
+
+
+### Build System
+
+* configure UTF-8 locale in devcontainer ([#774](https://github.com/openai/openai-node/issues/774)) ([bebf4f0](https://github.com/openai/openai-node/commit/bebf4f0ca1f884f8747caff0f0e065aafffde096))
+
## 4.35.0 (2024-04-15)
Full Changelog: [v4.34.0...v4.35.0](https://github.com/openai/openai-node/compare/v4.34.0...v4.35.0)
diff --git a/README.md b/README.md
index c32fcfcd9..406434e6d 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ You can import in Deno via:
```ts
-import OpenAI from 'https://deno.land/x/openai@v4.35.0/mod.ts';
+import OpenAI from 'https://deno.land/x/openai@v4.36.0/mod.ts';
```
diff --git a/build-deno b/build-deno
index ffdb2cb9d..6389062ec 100755
--- a/build-deno
+++ b/build-deno
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
Usage:
\`\`\`ts
-import OpenAI from "https://deno.land/x/openai@v4.35.0/mod.ts";
+import OpenAI from "https://deno.land/x/openai@v4.36.0/mod.ts";
const client = new OpenAI();
\`\`\`
diff --git a/package.json b/package.json
index d57fe15cd..e848ce857 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.35.0",
+ "version": "4.36.0",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/version.ts b/src/version.ts
index 7ca672a0d..460925cae 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.35.0'; // x-release-please-version
+export const VERSION = '4.36.0'; // x-release-please-version
From 0a1234dde22618ceb88954a8e480b6715b36f5b7 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Wed, 17 Apr 2024 12:34:49 -0400
Subject: [PATCH 068/533] feat(api): add vector stores (#776)
---
.stats.yml | 2 +-
README.md | 16 +-
api.md | 156 +++--
helpers.md | 23 +-
src/lib/AssistantStream.ts | 4 +-
src/lib/Util.ts | 23 +
.../beta/{assistants => }/assistants.ts | 298 +++++++--
src/resources/beta/assistants/files.ts | 154 -----
src/resources/beta/assistants/index.ts | 28 -
src/resources/beta/beta.ts | 13 +-
src/resources/beta/index.ts | 13 +-
src/resources/beta/threads/index.ts | 2 +-
.../beta/threads/{messages => }/messages.ts | 71 +-
src/resources/beta/threads/messages/files.ts | 105 ---
src/resources/beta/threads/messages/index.ts | 30 -
src/resources/beta/threads/runs/index.ts | 4 +-
src/resources/beta/threads/runs/runs.ts | 130 ++--
src/resources/beta/threads/runs/steps.ts | 98 +--
src/resources/beta/threads/threads.ts | 608 ++++++++++++++++--
.../beta/vector-stores/file-batches.ts | 292 +++++++++
src/resources/beta/vector-stores/files.ts | 277 ++++++++
src/resources/beta/vector-stores/index.ts | 25 +
.../beta/vector-stores/vector-stores.ts | 318 +++++++++
src/resources/fine-tuning/jobs/jobs.ts | 2 +-
.../beta/{assistants => }/assistants.test.ts | 11 +-
.../threads/{messages => }/messages.test.ts | 6 +-
.../beta/threads/messages/files.test.ts | 65 --
.../beta/threads/runs/runs.test.ts | 34 +-
.../beta/threads/threads.test.ts | 85 ++-
.../beta/vector-stores/file-batches.test.ts | 98 +++
.../files.test.ts | 22 +-
.../beta/vector-stores/vector-stores.test.ts | 97 +++
32 files changed, 2420 insertions(+), 690 deletions(-)
create mode 100644 src/lib/Util.ts
rename src/resources/beta/{assistants => }/assistants.ts (74%)
delete mode 100644 src/resources/beta/assistants/files.ts
delete mode 100644 src/resources/beta/assistants/index.ts
rename src/resources/beta/threads/{messages => }/messages.ts (89%)
delete mode 100644 src/resources/beta/threads/messages/files.ts
delete mode 100644 src/resources/beta/threads/messages/index.ts
create mode 100644 src/resources/beta/vector-stores/file-batches.ts
create mode 100644 src/resources/beta/vector-stores/files.ts
create mode 100644 src/resources/beta/vector-stores/index.ts
create mode 100644 src/resources/beta/vector-stores/vector-stores.ts
rename tests/api-resources/beta/{assistants => }/assistants.test.ts (93%)
rename tests/api-resources/beta/threads/{messages => }/messages.test.ts (93%)
delete mode 100644 tests/api-resources/beta/threads/messages/files.test.ts
create mode 100644 tests/api-resources/beta/vector-stores/file-batches.test.ts
rename tests/api-resources/beta/{assistants => vector-stores}/files.test.ts (77%)
create mode 100644 tests/api-resources/beta/vector-stores/vector-stores.test.ts
diff --git a/.stats.yml b/.stats.yml
index 47c2bce1c..2814bb777 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1 @@
-configured_endpoints: 55
+configured_endpoints: 62
diff --git a/README.md b/README.md
index 406434e6d..b75320e78 100644
--- a/README.md
+++ b/README.md
@@ -102,7 +102,7 @@ Documentation for each method, request param, and response field are available i
### Polling Helpers
-When interacting with the API some actions such as starting a Run may take time to complete. The SDK includes
+When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes
helper functions which will poll the status until it reaches a terminal state and then return the resulting object.
If an API method results in an action which could benefit from polling there will be a corresponding version of the
method ending in 'AndPoll'.
@@ -117,6 +117,20 @@ const run = await openai.beta.threads.runs.createAndPoll(thread.id, {
More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle)
+### Bulk Upload Helpers
+
+When creating an interacting with vector stores, you can use the polling helpers to monitor the status of operations.
+For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once.
+
+```ts
+const fileList = [
+ createReadStream('/home/data/example.pdf'),
+ ...
+];
+
+const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, fileList);
+```
+
### Streaming Helpers
The SDK also includes helpers to process streams and handle the incoming events.
diff --git a/api.md b/api.md
index 7557ce133..8161fb2c7 100644
--- a/api.md
+++ b/api.md
@@ -179,53 +179,88 @@ Methods:
# Beta
-## Chat
+## VectorStores
-### Completions
+Types:
+
+- VectorStore
+- VectorStoreDeleted
Methods:
-- client.beta.chat.completions.runFunctions(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner
-- client.beta.chat.completions.runTools(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner
-- client.beta.chat.completions.stream(body, options?) -> ChatCompletionStream
+- client.beta.vectorStores.create({ ...params }) -> VectorStore
+- client.beta.vectorStores.retrieve(vectorStoreId) -> VectorStore
+- client.beta.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore
+- client.beta.vectorStores.list({ ...params }) -> VectorStoresPage
+- client.beta.vectorStores.del(vectorStoreId) -> VectorStoreDeleted
-## Assistants
+### Files
Types:
-- Assistant
-- AssistantDeleted
-- AssistantStreamEvent
-- AssistantTool
-- CodeInterpreterTool
-- FunctionTool
-- MessageStreamEvent
-- RetrievalTool
-- RunStepStreamEvent
-- RunStreamEvent
-- ThreadStreamEvent
+- VectorStoreFile
+- VectorStoreFileDeleted
Methods:
-- client.beta.assistants.create({ ...params }) -> Assistant
-- client.beta.assistants.retrieve(assistantId) -> Assistant
-- client.beta.assistants.update(assistantId, { ...params }) -> Assistant
-- client.beta.assistants.list({ ...params }) -> AssistantsPage
-- client.beta.assistants.del(assistantId) -> AssistantDeleted
+- client.beta.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile
+- client.beta.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile
+- client.beta.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesPage
+- client.beta.vectorStores.files.del(vectorStoreId, fileId) -> VectorStoreFileDeleted
+- client.beta.vectorStores.files.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFile>
+- client.beta.vectorStores.files.poll(vectorStoreId, fileId, options?) -> Promise<VectorStoreFile>
+- client.beta.vectorStores.files.upload(vectorStoreId, file, options?) -> Promise<VectorStoreFile>
+- client.beta.vectorStores.files.uploadAndPoll(vectorStoreId, file, options?) -> Promise<VectorStoreFile>
-### Files
+### FileBatches
Types:
-- AssistantFile
-- FileDeleteResponse
+- VectorStoreFileBatch
Methods:
-- client.beta.assistants.files.create(assistantId, { ...params }) -> AssistantFile
-- client.beta.assistants.files.retrieve(assistantId, fileId) -> AssistantFile
-- client.beta.assistants.files.list(assistantId, { ...params }) -> AssistantFilesPage
-- client.beta.assistants.files.del(assistantId, fileId) -> FileDeleteResponse
+- client.beta.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatch
+- client.beta.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatch
+- client.beta.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatch
+- client.beta.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesPage
+- client.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFileBatch>
+- client.beta.vectorStores.fileBatches.poll(vectorStoreId, batchId, options?) -> Promise<VectorStoreFileBatch>
+- client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options?) -> Promise<VectorStoreFileBatch>
+
+## Chat
+
+### Completions
+
+Methods:
+
+- client.beta.chat.completions.runFunctions(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner
+- client.beta.chat.completions.runTools(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner
+- client.beta.chat.completions.stream(body, options?) -> ChatCompletionStream
+
+## Assistants
+
+Types:
+
+- Assistant
+- AssistantDeleted
+- AssistantStreamEvent
+- AssistantTool
+- CodeInterpreterTool
+- FileSearchTool
+- FunctionTool
+- MessageStreamEvent
+- RunStepStreamEvent
+- RunStreamEvent
+- ThreadStreamEvent
+
+Methods:
+
+- client.beta.assistants.create({ ...params }) -> Assistant
+- client.beta.assistants.retrieve(assistantId) -> Assistant
+- client.beta.assistants.update(assistantId, { ...params }) -> Assistant
+- client.beta.assistants.list({ ...params }) -> AssistantsPage
+- client.beta.assistants.del(assistantId) -> AssistantDeleted
## Threads
@@ -280,11 +315,11 @@ Types:
- CodeInterpreterOutputImage
- CodeInterpreterToolCall
- CodeInterpreterToolCallDelta
+- FileSearchToolCall
+- FileSearchToolCallDelta
- FunctionToolCall
- FunctionToolCallDelta
- MessageCreationStepDetails
-- RetrievalToolCall
-- RetrievalToolCallDelta
- RunStep
- RunStepDelta
- RunStepDeltaEvent
@@ -303,44 +338,33 @@ Methods:
Types:
-- Annotation
-- AnnotationDelta
-- FileCitationAnnotation
-- FileCitationDeltaAnnotation
-- FilePathAnnotation
-- FilePathDeltaAnnotation
-- ImageFile
-- ImageFileContentBlock
-- ImageFileDelta
-- ImageFileDeltaBlock
-- Message
-- MessageContent
-- MessageContentDelta
-- MessageDeleted
-- MessageDelta
-- MessageDeltaEvent
-- Text
-- TextContentBlock
-- TextDelta
-- TextDeltaBlock
-
-Methods:
-
-- client.beta.threads.messages.create(threadId, { ...params }) -> Message
-- client.beta.threads.messages.retrieve(threadId, messageId) -> Message
-- client.beta.threads.messages.update(threadId, messageId, { ...params }) -> Message
-- client.beta.threads.messages.list(threadId, { ...params }) -> MessagesPage
-
-#### Files
-
-Types:
-
-- MessageFile
+- Annotation
+- AnnotationDelta
+- FileCitationAnnotation
+- FileCitationDeltaAnnotation
+- FilePathAnnotation
+- FilePathDeltaAnnotation
+- ImageFile
+- ImageFileContentBlock
+- ImageFileDelta
+- ImageFileDeltaBlock
+- Message
+- MessageContent
+- MessageContentDelta
+- MessageDeleted
+- MessageDelta
+- MessageDeltaEvent
+- Text
+- TextContentBlock
+- TextDelta
+- TextDeltaBlock
Methods:
-- client.beta.threads.messages.files.retrieve(threadId, messageId, fileId) -> MessageFile
-- client.beta.threads.messages.files.list(threadId, messageId, { ...params }) -> MessageFilesPage
+- client.beta.threads.messages.create(threadId, { ...params }) -> Message
+- client.beta.threads.messages.retrieve(threadId, messageId) -> Message
+- client.beta.threads.messages.update(threadId, messageId, { ...params }) -> Message
+- client.beta.threads.messages.list(threadId, { ...params }) -> MessagesPage
# Batches
diff --git a/helpers.md b/helpers.md
index 7a34c3023..dda1ab26b 100644
--- a/helpers.md
+++ b/helpers.md
@@ -1,4 +1,4 @@
-# Streaming Helpers
+# Helpers
OpenAI supports streaming responses when interacting with the [Chat](#chat-streaming) or [Assistant](#assistant-streaming-api) APIs.
@@ -449,3 +449,24 @@ See an example of a Next.JS integration here [`examples/stream-to-client-next.ts
#### Proxy Streaming to a Browser
See an example of using express to stream to a browser here [`examples/stream-to-client-express.ts`](examples/stream-to-client-express.ts).
+
+# Polling Helpers
+
+When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete.
+The SDK includes helper functions which will poll the status until it reaches a terminal state and then return the resulting object.
+If an API method results in an action which could benefit from polling there will be a corresponding version of the
+method ending in `_AndPoll`.
+
+All methods also allow you to set the polling frequency, how often the API is checked for an update, via a function argument (`pollIntervalMs`).
+
+The polling methods are:
+
+```ts
+client.beta.threads.createAndRunPoll(...)
+client.beta.threads.runs.createAndPoll((...)
+client.beta.threads.runs.submitToolOutputsAndPoll((...)
+client.beta.vectorStores.files.uploadAndPoll((...)
+client.beta.vectorStores.files.createAndPoll((...)
+client.beta.vectorStores.fileBatches.createAndPoll((...)
+client.beta.vectorStores.fileBatches.uploadAndPoll((...)
+```
diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts
index ece0ec65c..a2974826c 100644
--- a/src/lib/AssistantStream.ts
+++ b/src/lib/AssistantStream.ts
@@ -7,7 +7,7 @@ import {
ImageFile,
TextDelta,
Messages,
-} from 'openai/resources/beta/threads/messages/messages';
+} from 'openai/resources/beta/threads/messages';
import * as Core from 'openai/core';
import { RequestOptions } from 'openai/core';
import {
@@ -30,7 +30,7 @@ import {
MessageStreamEvent,
RunStepStreamEvent,
RunStreamEvent,
-} from 'openai/resources/beta/assistants/assistants';
+} from 'openai/resources/beta/assistants';
import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from 'openai/resources/beta/threads/runs/steps';
import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads';
import MessageDelta = Messages.MessageDelta;
diff --git a/src/lib/Util.ts b/src/lib/Util.ts
new file mode 100644
index 000000000..ae09b8a91
--- /dev/null
+++ b/src/lib/Util.ts
@@ -0,0 +1,23 @@
+/**
+ * Like `Promise.allSettled()` but throws an error if any promises are rejected.
+ */
+export const allSettledWithThrow = async (promises: Promise[]): Promise => {
+ const results = await Promise.allSettled(promises);
+ const rejected = results.filter((result): result is PromiseRejectedResult => result.status === 'rejected');
+ if (rejected.length) {
+ for (const result of rejected) {
+ console.error(result.reason);
+ }
+
+ throw new Error(`${rejected.length} promise(s) failed - see the above errors`);
+ }
+
+ // Note: TS was complaining about using `.filter().map()` here for some reason
+ const values: R[] = [];
+ for (const result of results) {
+ if (result.status === 'fulfilled') {
+ values.push(result.value);
+ }
+ }
+ return values;
+};
diff --git a/src/resources/beta/assistants/assistants.ts b/src/resources/beta/assistants.ts
similarity index 74%
rename from src/resources/beta/assistants/assistants.ts
rename to src/resources/beta/assistants.ts
index fc9afe2ae..c0827848e 100644
--- a/src/resources/beta/assistants/assistants.ts
+++ b/src/resources/beta/assistants.ts
@@ -3,18 +3,15 @@
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
import { isRequestOptions } from 'openai/core';
-import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
+import * as AssistantsAPI from 'openai/resources/beta/assistants';
import * as Shared from 'openai/resources/shared';
-import * as FilesAPI from 'openai/resources/beta/assistants/files';
+import * as MessagesAPI from 'openai/resources/beta/threads/messages';
import * as ThreadsAPI from 'openai/resources/beta/threads/threads';
-import * as MessagesAPI from 'openai/resources/beta/threads/messages/messages';
import * as RunsAPI from 'openai/resources/beta/threads/runs/runs';
import * as StepsAPI from 'openai/resources/beta/threads/runs/steps';
import { CursorPage, type CursorPageParams } from 'openai/pagination';
export class Assistants extends APIResource {
- files: FilesAPI.Files = new FilesAPI.Files(this._client);
-
/**
* Create an assistant with a model and instructions.
*/
@@ -22,7 +19,7 @@ export class Assistants extends APIResource {
return this._client.post('/assistants', {
body,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -32,7 +29,7 @@ export class Assistants extends APIResource {
retrieve(assistantId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.get(`/assistants/${assistantId}`, {
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -47,7 +44,7 @@ export class Assistants extends APIResource {
return this._client.post(`/assistants/${assistantId}`, {
body,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -69,7 +66,7 @@ export class Assistants extends APIResource {
return this._client.getAPIList('/assistants', AssistantsPage, {
query,
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
@@ -79,7 +76,7 @@ export class Assistants extends APIResource {
del(assistantId: string, options?: Core.RequestOptions): Core.APIPromise {
return this._client.delete(`/assistants/${assistantId}`, {
...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
+ headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
}
}
@@ -105,13 +102,6 @@ export interface Assistant {
*/
description: string | null;
- /**
- * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs
- * attached to this assistant. There can be a maximum of 20 files attached to the
- * assistant. Files are ordered by their creation date in ascending order.
- */
- file_ids: Array;
-
/**
* The system instructions that the assistant uses. The maximum length is 256,000
* characters.
@@ -147,9 +137,53 @@ export interface Assistant {
/**
* A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ * assistant. Tools can be of types `code_interpreter`, `file_search`, or
+ * `function`.
*/
tools: Array;
+
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ tool_resources?: Assistant.ToolResources | null;
+}
+
+export namespace Assistant {
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter`` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The ID of the
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this assistant. There can be a maximum of 1 vector store attached to
+ * the assistant.
+ */
+ vector_store_ids?: Array;
+ }
+ }
}
export interface AssistantDeleted {
@@ -535,7 +569,7 @@ export namespace AssistantStreamEvent {
}
}
-export type AssistantTool = CodeInterpreterTool | RetrievalTool | FunctionTool;
+export type AssistantTool = CodeInterpreterTool | FileSearchTool | FunctionTool;
export interface CodeInterpreterTool {
/**
@@ -544,6 +578,13 @@ export interface CodeInterpreterTool {
type: 'code_interpreter';
}
+export interface FileSearchTool {
+ /**
+ * The type of tool being defined: `file_search`
+ */
+ type: 'file_search';
+}
+
export interface FunctionTool {
function: Shared.FunctionDefinition;
@@ -642,13 +683,6 @@ export namespace MessageStreamEvent {
}
}
-export interface RetrievalTool {
- /**
- * The type of tool being defined: `retrieval`
- */
- type: 'retrieval';
-}
-
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/runs/step-object) is
@@ -956,13 +990,6 @@ export interface AssistantCreateParams {
*/
description?: string | null;
- /**
- * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs
- * attached to this assistant. There can be a maximum of 20 files attached to the
- * assistant. Files are ordered by their creation date in ascending order.
- */
- file_ids?: Array;
-
/**
* The system instructions that the assistant uses. The maximum length is 256,000
* characters.
@@ -982,27 +1009,123 @@ export interface AssistantCreateParams {
*/
name?: string | null;
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+ response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
+
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ tool_resources?: AssistantCreateParams.ToolResources | null;
+
/**
* A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ * assistant. Tools can be of types `code_interpreter`, `file_search`, or
+ * `function`.
*/
tools?: Array;
+
+ /**
+ * An alternative to sampling with temperature, called nucleus sampling, where the
+ * model considers the results of the tokens with top_p probability mass. So 0.1
+ * means only the tokens comprising the top 10% probability mass are considered.
+ *
+ * We generally recommend altering this or temperature but not both.
+ */
+ top_p?: number | null;
}
-export interface AssistantUpdateParams {
+export namespace AssistantCreateParams {
/**
- * The description of the assistant. The maximum length is 512 characters.
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
*/
- description?: string | null;
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
+ * available to the `code_interpreter` tool. There can be a maximum of 20 files
+ * associated with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * The
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this assistant. There can be a maximum of 1 vector store attached to
+ * the assistant.
+ */
+ vector_store_ids?: Array;
+
+ /**
+ * A helper to create a
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * with file_ids and attach it to this assistant. There can be a maximum of 1
+ * vector store attached to the assistant.
+ */
+ vector_stores?: Array;
+ }
+ export namespace FileSearch {
+ export interface VectorStore {
+ /**
+ * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
+ * add to the vector store. There can be a maximum of 10000 files in a vector
+ * store.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to a vector store. This can be
+ * useful for storing additional information about the vector store in a structured
+ * format. Keys can be a maximum of 64 characters long and values can be a maxium
+ * of 512 characters long.
+ */
+ metadata?: unknown;
+ }
+ }
+ }
+}
+
+export interface AssistantUpdateParams {
/**
- * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs
- * attached to this assistant. There can be a maximum of 20 files attached to the
- * assistant. Files are ordered by their creation date in ascending order. If a
- * file was previously attached to the list but does not show up in the list, it
- * will be deleted from the assistant.
+ * The description of the assistant. The maximum length is 512 characters.
*/
- file_ids?: Array;
+ description?: string | null;
/**
* The system instructions that the assistant uses. The maximum length is 256,000
@@ -1032,11 +1155,90 @@ export interface AssistantUpdateParams {
*/
name?: string | null;
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
+ * message the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to
+ * produce JSON yourself via a system or user message. Without this, the model may
+ * generate an unending stream of whitespace until the generation reaches the token
+ * limit, resulting in a long-running and seemingly "stuck" request. Also note that
+ * the message content may be partially cut off if `finish_reason="length"`, which
+ * indicates the generation exceeded `max_tokens` or the conversation exceeded the
+ * max context length.
+ */
+ response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
+
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ tool_resources?: AssistantUpdateParams.ToolResources | null;
+
/**
* A list of tool enabled on the assistant. There can be a maximum of 128 tools per
- * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`.
+ * assistant. Tools can be of types `code_interpreter`, `file_search`, or
+ * `function`.
*/
tools?: Array;
+
+ /**
+ * An alternative to sampling with temperature, called nucleus sampling, where the
+ * model considers the results of the tokens with top_p probability mass. So 0.1
+ * means only the tokens comprising the top 10% probability mass are considered.
+ *
+ * We generally recommend altering this or temperature but not both.
+ */
+ top_p?: number | null;
+}
+
+export namespace AssistantUpdateParams {
+ /**
+ * A set of resources that are used by the assistant's tools. The resources are
+ * specific to the type of tool. For example, the `code_interpreter` tool requires
+ * a list of file IDs, while the `file_search` tool requires a list of vector store
+ * IDs.
+ */
+ export interface ToolResources {
+ code_interpreter?: ToolResources.CodeInterpreter;
+
+ file_search?: ToolResources.FileSearch;
+ }
+
+ export namespace ToolResources {
+ export interface CodeInterpreter {
+ /**
+ * Overrides the list of
+ * [file](https://platform.openai.com/docs/api-reference/files) IDs made available
+ * to the `code_interpreter` tool. There can be a maximum of 20 files associated
+ * with the tool.
+ */
+ file_ids?: Array;
+ }
+
+ export interface FileSearch {
+ /**
+ * Overrides the
+ * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
+ * attached to this assistant. There can be a maximum of 1 vector store attached to
+ * the assistant.
+ */
+ vector_store_ids?: Array;
+ }
+ }
}
export interface AssistantListParams extends CursorPageParams {
@@ -1061,9 +1263,9 @@ export namespace Assistants {
export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent;
export import AssistantTool = AssistantsAPI.AssistantTool;
export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool;
+ export import FileSearchTool = AssistantsAPI.FileSearchTool;
export import FunctionTool = AssistantsAPI.FunctionTool;
export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent;
- export import RetrievalTool = AssistantsAPI.RetrievalTool;
export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent;
export import RunStreamEvent = AssistantsAPI.RunStreamEvent;
export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent;
@@ -1071,10 +1273,4 @@ export namespace Assistants {
export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams;
export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams;
export import AssistantListParams = AssistantsAPI.AssistantListParams;
- export import Files = FilesAPI.Files;
- export import AssistantFile = FilesAPI.AssistantFile;
- export import FileDeleteResponse = FilesAPI.FileDeleteResponse;
- export import AssistantFilesPage = FilesAPI.AssistantFilesPage;
- export import FileCreateParams = FilesAPI.FileCreateParams;
- export import FileListParams = FilesAPI.FileListParams;
}
diff --git a/src/resources/beta/assistants/files.ts b/src/resources/beta/assistants/files.ts
deleted file mode 100644
index 51fd0c0d8..000000000
--- a/src/resources/beta/assistants/files.ts
+++ /dev/null
@@ -1,154 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import * as Core from 'openai/core';
-import { APIResource } from 'openai/resource';
-import { isRequestOptions } from 'openai/core';
-import * as FilesAPI from 'openai/resources/beta/assistants/files';
-import { CursorPage, type CursorPageParams } from 'openai/pagination';
-
-export class Files extends APIResource {
- /**
- * Create an assistant file by attaching a
- * [File](https://platform.openai.com/docs/api-reference/files) to an
- * [assistant](https://platform.openai.com/docs/api-reference/assistants).
- */
- create(
- assistantId: string,
- body: FileCreateParams,
- options?: Core.RequestOptions,
- ): Core.APIPromise {
- return this._client.post(`/assistants/${assistantId}/files`, {
- body,
- ...options,
- headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
- });
- }
-
- /**
- * Retrieves an AssistantFile.
- */
- retrieve(
- assistantId: string,
- fileId: string,
- options?: Core.RequestOptions,
- ): Core.APIPromise