diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile deleted file mode 100644 index 8ea34be96..000000000 --- a/.devcontainer/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM debian:bookworm-slim AS stainless - -RUN apt-get update && apt-get install -y \ - nodejs \ - npm \ - yarnpkg \ - && apt-get clean autoclean - -# Ensure UTF-8 encoding -ENV LANG=C.UTF-8 -ENV LC_ALL=C.UTF-8 - -# Yarn -RUN ln -sf /usr/bin/yarnpkg /usr/bin/yarn - -WORKDIR /workspace - -COPY package.json yarn.lock /workspace/ - -RUN yarn install - -COPY . /workspace diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index d55fc4d67..763462fad 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,20 +1,17 @@ // For format details, see https://aka.ms/devcontainer.json. For config options, see the // README at: https://github.com/devcontainers/templates/tree/main/src/debian { - "name": "Debian", - "build": { - "dockerfile": "Dockerfile" + "name": "Development", + "image": "mcr.microsoft.com/devcontainers/typescript-node:latest", + "features": { + "ghcr.io/devcontainers/features/node:1": {} + }, + "postCreateCommand": "yarn install", + "customizations": { + "vscode": { + "extensions": [ + "esbenp.prettier-vscode" + ] + } } - - // Features to add to the dev container. More info: https://containers.dev/features. - // "features": {}, - - // Use 'forwardPorts' to make a list of ports inside the container available locally. - // "forwardPorts": [], - - // Configure tool-specific properties. - // "customizations": {}, - - // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. - // "remoteUser": "root" } diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d6798e38a..09f1636b6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,19 +1,18 @@ name: CI on: push: - branches: - - master - pull_request: - branches: - - master - - next + branches-ignore: + - 'generated' + - 'codegen/**' + - 'integrated/**' + - 'stl-preview-head/**' + - 'stl-preview-base/**' jobs: lint: + timeout-minutes: 10 name: lint - runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' - + runs-on: ${{ github.repository == 'stainless-sdks/openai-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 @@ -29,10 +28,12 @@ jobs: run: ./scripts/lint build: + timeout-minutes: 5 name: build - runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' - + runs-on: ${{ github.repository == 'stainless-sdks/openai-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + permissions: + contents: read + id-token: write steps: - uses: actions/checkout@v4 @@ -46,21 +47,89 @@ jobs: - name: Check build run: ./scripts/build + + - name: Get GitHub OIDC Token + if: github.repository == 'stainless-sdks/openai-node' + id: github-oidc + uses: actions/github-script@v6 + with: + script: core.setOutput('github_token', await core.getIDToken()); + + - name: Upload tarball + if: github.repository == 'stainless-sdks/openai-node' + env: + URL: https://pkg.stainless.com/s + AUTH: ${{ steps.github-oidc.outputs.github_token }} + SHA: ${{ github.sha }} + run: ./scripts/utils/upload-artifact.sh test: + timeout-minutes: 10 name: test - runs-on: ubuntu-latest - if: github.repository == 'openai/openai-node' - + runs-on: ${{ github.repository == 'stainless-sdks/openai-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 - name: Set up Node uses: actions/setup-node@v4 with: - node-version: '18' + node-version: '20' - name: Bootstrap run: ./scripts/bootstrap - name: Run tests run: ./scripts/test + + examples: + timeout-minutes: 10 + name: examples + runs-on: ${{ github.repository == 'stainless-sdks/openai-node' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.repository == 'openai/openai-node' + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node + uses: actions/setup-node@v4 + with: + node-version: '20' + - name: Install dependencies + run: | + yarn install + + - env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + yarn tsn examples/demo.ts + + ecosystem_tests: + name: ecosystem tests (v${{ matrix.node-version }}) + runs-on: ubuntu-latest + timeout-minutes: 20 + strategy: + fail-fast: false + matrix: + node-version: ['20'] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node + uses: actions/setup-node@v4 + with: + node-version: '${{ matrix.node-version }}' + + - uses: denoland/setup-deno@v1 + with: + deno-version: v1.39.0 + + - uses: oven-sh/setup-bun@v2 + + - name: Bootstrap + run: ./scripts/bootstrap + + - name: Run ecosystem tests + run: | + yarn tsn ecosystem-tests/cli.ts --live --verbose --parallel --jobs=4 --retry=3 + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml index d5ae1f755..19b7dd831 100644 --- a/.github/workflows/create-releases.yml +++ b/.github/workflows/create-releases.yml @@ -12,6 +12,9 @@ jobs: if: github.ref == 'refs/heads/master' && github.repository == 'openai/openai-node' runs-on: ubuntu-latest environment: publish + permissions: + contents: read + id-token: write steps: - uses: actions/checkout@v4 @@ -22,27 +25,12 @@ jobs: repo: ${{ github.event.repository.full_name }} stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - - name: Generate a token - id: generate_token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} - owner: 'openai' - repositories: 'openai-node,openai-deno-build' - - name: Set up Node if: ${{ steps.release.outputs.releases_created }} uses: actions/setup-node@v3 with: node-version: '18' - - name: Set up Deno - if: ${{ steps.release.outputs.releases_created }} - uses: denoland/setup-deno@v1 - with: - deno-version: v1.35.1 - - name: Install dependencies if: ${{ steps.release.outputs.releases_created }} run: | @@ -55,11 +43,8 @@ jobs: env: NPM_TOKEN: ${{ secrets.OPENAI_NPM_TOKEN || secrets.NPM_TOKEN }} - - name: Publish to Deno + - name: Publish to JSR if: ${{ steps.release.outputs.releases_created }} run: | - bash ./scripts/git-publish-deno.sh - env: - DENO_PUSH_REMOTE_URL: https://username:${{ steps.generate_token.outputs.token }}@github.com/openai/openai-deno-build.git - DENO_PUSH_BRANCH: main + bash ./bin/publish-jsr diff --git a/.github/workflows/publish-deno.yml b/.github/workflows/publish-deno.yml deleted file mode 100644 index 894c516a0..000000000 --- a/.github/workflows/publish-deno.yml +++ /dev/null @@ -1,44 +0,0 @@ -# workflow for re-running publishing to Deno in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-deno.yml -name: Publish Deno -on: - workflow_dispatch: - -jobs: - publish: - name: publish - runs-on: ubuntu-latest - environment: publish - - steps: - - uses: actions/checkout@v4 - - - name: Generate a token - id: generate_token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.APP_ID }} - private-key: ${{ secrets.APP_PRIVATE_KEY }} - owner: 'openai' - repositories: 'openai-node,openai-deno-build' - - - name: Set up Node - uses: actions/setup-node@v3 - with: - node-version: '18' - - - name: Set up Deno - uses: denoland/setup-deno@v1 - with: - deno-version: v1.35.1 - - - name: Install dependencies - run: | - yarn install - - - name: Publish to Deno - run: | - bash ./scripts/git-publish-deno.sh - env: - DENO_PUSH_REMOTE_URL: https://username:${{ steps.generate_token.outputs.token }}@github.com/openai/openai-deno-build.git - DENO_PUSH_BRANCH: main diff --git a/.github/workflows/publish-jsr.yml b/.github/workflows/publish-jsr.yml new file mode 100644 index 000000000..e74673c1f --- /dev/null +++ b/.github/workflows/publish-jsr.yml @@ -0,0 +1,30 @@ +# workflow for re-running publishing to JSR in case it fails for some reason +# you can run this workflow by navigating to https://www.github.com/openai/openai-node/actions/workflows/publish-jsr.yml +name: Publish JSR +on: + workflow_dispatch: + +jobs: + publish: + name: publish + runs-on: ubuntu-latest + permissions: + contents: read + id-token: write + environment: publish + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node + uses: actions/setup-node@v3 + with: + node-version: '20' + + - name: Install dependencies + run: | + yarn install + + - name: Publish to JSR + run: | + bash ./bin/publish-jsr diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index 5a3711b53..0662a79c5 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Node uses: actions/setup-node@v3 with: - node-version: '18' + node-version: '20' - name: Install dependencies run: | diff --git a/.gitignore b/.gitignore index 0af7568e5..3fdab1cb7 100644 --- a/.gitignore +++ b/.gitignore @@ -4,11 +4,11 @@ yarn-error.log codegen.log Brewfile.lock.json dist -/deno +dist-deno /*.tgz .idea/ tmp .pack ecosystem-tests/deno/package.json ecosystem-tests/*/openai.tgz - +.dev.vars diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e8c54ecee..989bed91e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.67.3" + ".": "4.100.0" } diff --git a/.stats.yml b/.stats.yml index 68789976b..afa33d93b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,4 @@ -configured_endpoints: 68 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-71e58a77027c67e003fdd1b1ac8ac11557d8bfabc7666d1a827c6b1ca8ab98b5.yml +configured_endpoints: 101 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-262e171d0a8150ea1192474d16ba3afdf9a054b399f1a49a9c9b697a3073c136.yml +openapi_spec_hash: 33e00a48df8f94c94f46290c489f132b +config_hash: d8d5fda350f6db77c784f35429741a2e diff --git a/CHANGELOG.md b/CHANGELOG.md index 710d09ca9..adda41e52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,869 @@ # Changelog +## 4.100.0 (2025-05-16) + +Full Changelog: [v4.99.0...v4.100.0](https://github.com/openai/openai-node/compare/v4.99.0...v4.100.0) + +### Features + +* **api:** further updates for evals API ([3f6f248](https://github.com/openai/openai-node/commit/3f6f248191b45015924be76fd5154d149c4ed8a0)) + + +### Chores + +* **internal:** version bump ([5123fe0](https://github.com/openai/openai-node/commit/5123fe08a56f3d0040b1cc67129382f3eacc3cca)) + +## 4.99.0 (2025-05-16) + +Full Changelog: [v4.98.0...v4.99.0](https://github.com/openai/openai-node/compare/v4.98.0...v4.99.0) + +### Features + +* **api:** manual updates ([75eb804](https://github.com/openai/openai-node/commit/75eb804edd6ad653eaa22d47f8c6d09ee845ebf4)) +* **api:** responses x eval api ([5029f1a](https://github.com/openai/openai-node/commit/5029f1a05eb1e8601ada06e0a5ba49f4c2b83c02)) +* **api:** Updating Assistants and Evals API schemas ([27fd517](https://github.com/openai/openai-node/commit/27fd5173b20f75debe96024ae8f1ce58a8254d26)) + +## 4.98.0 (2025-05-08) + +Full Changelog: [v4.97.0...v4.98.0](https://github.com/openai/openai-node/compare/v4.97.0...v4.98.0) + +### Features + +* **api:** Add reinforcement fine-tuning api support ([4aa7a79](https://github.com/openai/openai-node/commit/4aa7a7954c63caa26cc1640ace56093fe1cafa04)) + + +### Chores + +* **ci:** bump node version for release workflows ([2961f63](https://github.com/openai/openai-node/commit/2961f63c4d5b8ae8efdf8ea6581aa83c6b0f722e)) +* **internal:** fix formatting ([91a44fe](https://github.com/openai/openai-node/commit/91a44fe11c0847dc50d48a03a8d409ac4bece37a)) + + +### Documentation + +* add examples to tsdocs ([7d841b7](https://github.com/openai/openai-node/commit/7d841b7f98eb542a398fb9de12056125e8d6cb22)) + +## 4.97.0 (2025-05-02) + +Full Changelog: [v4.96.2...v4.97.0](https://github.com/openai/openai-node/compare/v4.96.2...v4.97.0) + +### Features + +* **api:** add image sizes, reasoning encryption ([9c2113a](https://github.com/openai/openai-node/commit/9c2113af7c7ea9a797a0e39d07d9ad8627c96acb)) + + +### Chores + +* **docs:** add missing deprecation warnings ([253392c](https://github.com/openai/openai-node/commit/253392c93adca88e0ee83f784183b2128ff64a16)) + + +### Documentation + +* fix "procesing" -> "processing" in realtime examples ([#1406](https://github.com/openai/openai-node/issues/1406)) ([8717b9f](https://github.com/openai/openai-node/commit/8717b9fce87d03e51d40ee58f5d6259408405e1f)) +* **readme:** fix typo ([cab3478](https://github.com/openai/openai-node/commit/cab3478f195f9de5c21033a1b3684f52ad347ffc)) + +## 4.96.2 (2025-04-29) + +Full Changelog: [v4.96.1...v4.96.2](https://github.com/openai/openai-node/compare/v4.96.1...v4.96.2) + +### Bug Fixes + +* **types:** export ParseableToolsParams ([#1486](https://github.com/openai/openai-node/issues/1486)) ([3e7c92c](https://github.com/openai/openai-node/commit/3e7c92c8a76c1f747610d63d9d69a88b796ee9fc)) + + +### Chores + +* **ci:** only use depot for staging repos ([214da39](https://github.com/openai/openai-node/commit/214da398c76f46d40994665f3ca7e10e203e9579)) +* **ci:** run on more branches and use depot runners ([ead76fc](https://github.com/openai/openai-node/commit/ead76fc6429ac52a1c8b008ac5c0afcefaa0bae5)) + +## 4.96.1 (2025-04-29) + +Full Changelog: [v4.96.0...v4.96.1](https://github.com/openai/openai-node/compare/v4.96.0...v4.96.1) + +### Bug Fixes + +* **types:** export ParseableToolsParams ([#1486](https://github.com/openai/openai-node/issues/1486)) ([eb055b2](https://github.com/openai/openai-node/commit/eb055b26ce90e5fe1b101a95a4390956d519e168)) + + +### Chores + +* **ci:** only use depot for staging repos ([e80af47](https://github.com/openai/openai-node/commit/e80af47590056baa8f456e8d60c37f0d00ff08c4)) +* **ci:** run on more branches and use depot runners ([b04a801](https://github.com/openai/openai-node/commit/b04a801d0356105eacddbb4d10f4359719585dd6)) + +## 4.96.0 (2025-04-23) + +Full Changelog: [v4.95.1...v4.96.0](https://github.com/openai/openai-node/compare/v4.95.1...v4.96.0) + +### Features + +* **api:** adding new image model support ([a00d331](https://github.com/openai/openai-node/commit/a00d33190edd08df7d9c088c00ab7b77673f88ba)) + + +### Bug Fixes + +* **types:** export AssistantStream ([#1472](https://github.com/openai/openai-node/issues/1472)) ([626c844](https://github.com/openai/openai-node/commit/626c844a758a68ffbff48873d4773be2e3868952)) + + +### Chores + +* **ci:** add timeout thresholds for CI jobs ([e465063](https://github.com/openai/openai-node/commit/e46506351097f1de39c866c28b6ec20fa724fc36)) + +## 4.95.1 (2025-04-18) + +Full Changelog: [v4.95.0...v4.95.1](https://github.com/openai/openai-node/compare/v4.95.0...v4.95.1) + +### Bug Fixes + +* **zod:** warn on optional field usage ([#1469](https://github.com/openai/openai-node/issues/1469)) ([aea2d12](https://github.com/openai/openai-node/commit/aea2d123d200e6a7eae11e66583127270a8db8bf)) + +## 4.95.0 (2025-04-16) + +Full Changelog: [v4.94.0...v4.95.0](https://github.com/openai/openai-node/compare/v4.94.0...v4.95.0) + +### Features + +* **api:** add o3 and o4-mini model IDs ([4845cd9](https://github.com/openai/openai-node/commit/4845cd9ac17450022f1632ae01397e41a97f1662)) + +## 4.94.0 (2025-04-14) + +Full Changelog: [v4.93.0...v4.94.0](https://github.com/openai/openai-node/compare/v4.93.0...v4.94.0) + +### Features + +* **api:** adding gpt-4.1 family of model IDs ([bddcbcf](https://github.com/openai/openai-node/commit/bddcbcffdc409ffc8a078a65bbd302cd50b35ff0)) +* **api:** manual updates ([7532f48](https://github.com/openai/openai-node/commit/7532f48ad25c5125064a59985587c20c47a2cbfb)) + + +### Chores + +* **client:** minor internal fixes ([d342f17](https://github.com/openai/openai-node/commit/d342f17e2642da5ee83d080b410dc3c4fe153814)) +* **internal:** reduce CI branch coverage ([a49b94a](https://github.com/openai/openai-node/commit/a49b94a9aebd3e30e1802fff633e1b46cfb81942)) +* **internal:** upload builds and expand CI branch coverage ([#1460](https://github.com/openai/openai-node/issues/1460)) ([7e23bb4](https://github.com/openai/openai-node/commit/7e23bb4f4a09303195b612cc5b393cc41c1d855b)) +* workaround build errors ([913eba8](https://github.com/openai/openai-node/commit/913eba828d116f49fa78b219c62274c1e95c6f17)) + +## 4.93.0 (2025-04-08) + +Full Changelog: [v4.92.1...v4.93.0](https://github.com/openai/openai-node/compare/v4.92.1...v4.93.0) + +### Features + +* **api:** Add evalapi to sdk ([#1456](https://github.com/openai/openai-node/issues/1456)) ([ee917e3](https://github.com/openai/openai-node/commit/ee917e3335fcf44e87a28e54ce8ddfdcdfab1652)) + + +### Chores + +* **internal:** fix examples ([#1457](https://github.com/openai/openai-node/issues/1457)) ([a3dd0dd](https://github.com/openai/openai-node/commit/a3dd0dde3e8ad9cc7a02cf203d4550f91d31a2ae)) +* **internal:** skip broken test ([#1458](https://github.com/openai/openai-node/issues/1458)) ([4d2f815](https://github.com/openai/openai-node/commit/4d2f815ba5f6c426f9c21f4c3db443166389bbf8)) +* **tests:** improve enum examples ([#1454](https://github.com/openai/openai-node/issues/1454)) ([ecabce2](https://github.com/openai/openai-node/commit/ecabce282a9fb60122310942f3b647dfefae5403)) + +## 4.92.1 (2025-04-07) + +Full Changelog: [v4.92.0...v4.92.1](https://github.com/openai/openai-node/compare/v4.92.0...v4.92.1) + +### Chores + +* **internal:** only run examples workflow in main repo ([#1450](https://github.com/openai/openai-node/issues/1450)) ([5e49a7a](https://github.com/openai/openai-node/commit/5e49a7a447bb788fa05898c15ae57c6ea9c8fd49)) + +## 4.92.0 (2025-04-07) + +Full Changelog: [v4.91.1...v4.92.0](https://github.com/openai/openai-node/compare/v4.91.1...v4.92.0) + +### Features + +* **api:** manual updates ([891754d](https://github.com/openai/openai-node/commit/891754d7fa42d71ce4f93288dd043ef0b97fee23)) +* **api:** manual updates ([01e5546](https://github.com/openai/openai-node/commit/01e5546f3f48a1f4d645e09e7581f16b30f25bdd)) +* **api:** manual updates ([f38dbf3](https://github.com/openai/openai-node/commit/f38dbf3b39b0800b3bbef5c603a4fa2b616f25d8)) +* **api:** manual updates ([1f12253](https://github.com/openai/openai-node/commit/1f12253054a5a7e35dc03b17901b4c1f33bf5b3d)) + + +### Bug Fixes + +* **api:** improve type resolution when importing as a package ([#1444](https://github.com/openai/openai-node/issues/1444)) ([4aa46d6](https://github.com/openai/openai-node/commit/4aa46d6c0da681bcdde31fcbb09e8ba6fdaf764b)) +* **client:** send `X-Stainless-Timeout` in seconds ([#1442](https://github.com/openai/openai-node/issues/1442)) ([aa4206c](https://github.com/openai/openai-node/commit/aa4206c7d93b4e3114a697f5467ffbbf5a64d1a8)) +* **embeddings:** correctly decode base64 data ([#1448](https://github.com/openai/openai-node/issues/1448)) ([58128f7](https://github.com/openai/openai-node/commit/58128f7efde73726da740c42adde7b02cdf60a6a)) +* **mcp:** remove unused tools.ts ([#1445](https://github.com/openai/openai-node/issues/1445)) ([520a8fa](https://github.com/openai/openai-node/commit/520a8fa77a69ce5855dde3481f9bd39339cb7b83)) + + +### Chores + +* **internal:** add aliases for Record and Array ([#1443](https://github.com/openai/openai-node/issues/1443)) ([b65391b](https://github.com/openai/openai-node/commit/b65391ba10d5063035c3e5c0bcc5a48ffc80f41d)) + +## 4.91.1 (2025-04-01) + +Full Changelog: [v4.91.0...v4.91.1](https://github.com/openai/openai-node/compare/v4.91.0...v4.91.1) + +### Bug Fixes + +* **docs:** correct docstring on responses.stream ([1c8cd6a](https://github.com/openai/openai-node/commit/1c8cd6a638128b0ff5fac89d6c7db256f0b63a85)) + + +### Chores + +* Remove deprecated/unused remote spec feature ([ce3dfa8](https://github.com/openai/openai-node/commit/ce3dfa88bd4d395debccc0e6e1aac6d218b07cb8)) + +## 4.91.0 (2025-03-31) + +Full Changelog: [v4.90.0...v4.91.0](https://github.com/openai/openai-node/compare/v4.90.0...v4.91.0) + +### Features + +* **api:** add `get /responses/{response_id}/input_items` endpoint ([ef0e0ac](https://github.com/openai/openai-node/commit/ef0e0acd469379ae6f2745c83e6c6813ff7b4edc)) + + +### Performance Improvements + +* **embedding:** default embedding creation to base64 ([#1312](https://github.com/openai/openai-node/issues/1312)) ([e54530e](https://github.com/openai/openai-node/commit/e54530e4f6f00d7d74fc8636bbdb6f6280548750)), closes [#1310](https://github.com/openai/openai-node/issues/1310) + +## 4.90.0 (2025-03-27) + +Full Changelog: [v4.89.1...v4.90.0](https://github.com/openai/openai-node/compare/v4.89.1...v4.90.0) + +### Features + +* **api:** add `get /chat/completions` endpoint ([2d6710a](https://github.com/openai/openai-node/commit/2d6710a1f9dd4f768d9c73e9c9f5f93c737cdc66)) + + +### Bug Fixes + +* **audio:** correctly handle transcription streaming ([2a9b603](https://github.com/openai/openai-node/commit/2a9b60336cd40a4d4fb9b898ece49170ad648fd0)) +* **internal:** work around https://github.com/vercel/next.js/issues/76881 ([#1427](https://github.com/openai/openai-node/issues/1427)) ([b467e94](https://github.com/openai/openai-node/commit/b467e949476621e8e92587a83c9de6fab35b2b9d)) + + +### Chores + +* add hash of OpenAPI spec/config inputs to .stats.yml ([45db35e](https://github.com/openai/openai-node/commit/45db35e34be560c75bf36224cc153c6d0e6e2a88)) +* **api:** updates to supported Voice IDs ([#1424](https://github.com/openai/openai-node/issues/1424)) ([404f4db](https://github.com/openai/openai-node/commit/404f4db41a2ee651f5bfdaa7b8881e1bf015f058)) +* **client:** expose headers on some streaming errors ([#1423](https://github.com/openai/openai-node/issues/1423)) ([b0783cc](https://github.com/openai/openai-node/commit/b0783cc6221b68f1738e759b393756a7d0e540a3)) + +## 4.89.1 (2025-03-26) + +Full Changelog: [v4.89.0...v4.89.1](https://github.com/openai/openai-node/compare/v4.89.0...v4.89.1) + +### Bug Fixes + +* avoid type error in certain environments ([#1413](https://github.com/openai/openai-node/issues/1413)) ([d3f6f8f](https://github.com/openai/openai-node/commit/d3f6f8f9c7511a98cc5795756fee49a30e44d485)) +* **client:** remove duplicate types ([#1410](https://github.com/openai/openai-node/issues/1410)) ([338878b](https://github.com/openai/openai-node/commit/338878bf484dac5a4fadf50592b1f8d1045cd4b6)) +* **exports:** add missing type exports ([#1417](https://github.com/openai/openai-node/issues/1417)) ([2d15ada](https://github.com/openai/openai-node/commit/2d15ada0e0d81a4e0d097dddbe99be2222c4c0ef)) + + +### Chores + +* **internal:** version bump ([#1408](https://github.com/openai/openai-node/issues/1408)) ([9c0949a](https://github.com/openai/openai-node/commit/9c0949a93c3e181d327f820dbc2a4b0ad77258e9)) + +## 4.89.0 (2025-03-20) + +Full Changelog: [v4.88.0...v4.89.0](https://github.com/openai/openai-node/compare/v4.88.0...v4.89.0) + +### Features + +* add audio helpers ([ea1b6b4](https://github.com/openai/openai-node/commit/ea1b6b4ef38813af568b3662037519da9404b80e)) +* **api:** new models for TTS, STT, + new audio features for Realtime ([#1407](https://github.com/openai/openai-node/issues/1407)) ([142933a](https://github.com/openai/openai-node/commit/142933ae70d06045dbf4661cd72c7fa35ae7903d)) + + +### Chores + +* **internal:** version bump ([#1400](https://github.com/openai/openai-node/issues/1400)) ([6838ab4](https://github.com/openai/openai-node/commit/6838ab4268c7c0e083e7be21ef1a51bdea0f0b57)) + +## 4.88.0 (2025-03-19) + +Full Changelog: [v4.87.4...v4.88.0](https://github.com/openai/openai-node/compare/v4.87.4...v4.88.0) + +### Features + +* **api:** o1-pro now available through the API ([#1398](https://github.com/openai/openai-node/issues/1398)) ([616a7e9](https://github.com/openai/openai-node/commit/616a7e90e764882cd749a65af8cc6ae8734fc80d)) + + +### Chores + +* **exports:** cleaner resource index imports ([#1396](https://github.com/openai/openai-node/issues/1396)) ([26b0856](https://github.com/openai/openai-node/commit/26b0856cd63846c34b75895a1ea42ceec7908c1a)) +* **exports:** stop using path fallbacks ([#1397](https://github.com/openai/openai-node/issues/1397)) ([d1479c2](https://github.com/openai/openai-node/commit/d1479c23aff68dd46c73fd31896dd2298a6bf140)) +* **internal:** version bump ([#1393](https://github.com/openai/openai-node/issues/1393)) ([7f16c3a](https://github.com/openai/openai-node/commit/7f16c3aa7b1ab36541219c5a0f93fc518733d0e3)) + +## 4.87.4 (2025-03-18) + +Full Changelog: [v4.87.3...v4.87.4](https://github.com/openai/openai-node/compare/v4.87.3...v4.87.4) + +### Bug Fixes + +* **api:** correct some Responses types ([#1391](https://github.com/openai/openai-node/issues/1391)) ([af45876](https://github.com/openai/openai-node/commit/af458766ac721fb6cf18e7d78c458506c8bfc4e1)) +* **types:** ignore missing `id` in responses pagination ([1b9d20e](https://github.com/openai/openai-node/commit/1b9d20e71f5afbd4999f1999fe4810175476c5d2)) +* **types:** improve responses type names ([#1392](https://github.com/openai/openai-node/issues/1392)) ([164f476](https://github.com/openai/openai-node/commit/164f47606b41fd3e2850f8209eb1c6e2996a81ff)) + + +### Chores + +* add missing type alias exports ([#1390](https://github.com/openai/openai-node/issues/1390)) ([16c5e22](https://github.com/openai/openai-node/commit/16c5e2261c8c1a0ba96c2d5f475e8b1bc67387d7)) +* **internal:** add back release workflow ([dddf29b](https://github.com/openai/openai-node/commit/dddf29bd914a02d4586b239ec06217389a4409f9)) +* **internal:** remove CI condition ([#1381](https://github.com/openai/openai-node/issues/1381)) ([ef17981](https://github.com/openai/openai-node/commit/ef17981a0bd6b3e971986ece829c5d260d7392d4)) +* **internal:** run CI on update-specs branch ([9fc2130](https://github.com/openai/openai-node/commit/9fc2130b74a5919a3bbd41926903bdb310de4446)) +* **internal:** update release workflows ([90b77d0](https://github.com/openai/openai-node/commit/90b77d09c04d21487aa38fe775c79ae632136813)) + +## 4.87.3 (2025-03-11) + +Full Changelog: [v4.87.2...v4.87.3](https://github.com/openai/openai-node/compare/v4.87.2...v4.87.3) + +### Bug Fixes + +* **responses:** correct reasoning output type ([2abef57](https://github.com/openai/openai-node/commit/2abef57d7645a96a4b9a6b91483861cd568d2d4d)) + +## 4.87.2 (2025-03-11) + +Full Changelog: [v4.87.1...v4.87.2](https://github.com/openai/openai-node/compare/v4.87.1...v4.87.2) + +### Bug Fixes + +* **responses:** correctly add output_text ([4ceb5cc](https://github.com/openai/openai-node/commit/4ceb5cc516b8c75d46f0042534d7658796a8cd71)) + +## 4.87.1 (2025-03-11) + +Full Changelog: [v4.87.0...v4.87.1](https://github.com/openai/openai-node/compare/v4.87.0...v4.87.1) + +### Bug Fixes + +* correct imports ([5cdf17c](https://github.com/openai/openai-node/commit/5cdf17cec33da7cf540b8bdbcfa30c0c52842dd1)) + +## 4.87.0 (2025-03-11) + +Full Changelog: [v4.86.2...v4.87.0](https://github.com/openai/openai-node/compare/v4.86.2...v4.87.0) + +### Features + +* **api:** add /v1/responses and built-in tools ([119b584](https://github.com/openai/openai-node/commit/119b5843a18b8014167c8d2031d75c08dbf400a3)) + +## 4.86.2 (2025-03-05) + +Full Changelog: [v4.86.1...v4.86.2](https://github.com/openai/openai-node/compare/v4.86.1...v4.86.2) + +### Chores + +* **internal:** run example files in CI ([#1357](https://github.com/openai/openai-node/issues/1357)) ([88d0050](https://github.com/openai/openai-node/commit/88d0050336749deb3810b4cb43473de1f84e42bd)) + +## 4.86.1 (2025-02-27) + +Full Changelog: [v4.86.0...v4.86.1](https://github.com/openai/openai-node/compare/v4.86.0...v4.86.1) + +### Documentation + +* update URLs from stainlessapi.com to stainless.com ([#1352](https://github.com/openai/openai-node/issues/1352)) ([8294e9e](https://github.com/openai/openai-node/commit/8294e9ef57ed98722105b56d205ebea9d028f671)) + +## 4.86.0 (2025-02-27) + +Full Changelog: [v4.85.4...v4.86.0](https://github.com/openai/openai-node/compare/v4.85.4...v4.86.0) + +### Features + +* **api:** add gpt-4.5-preview ([#1349](https://github.com/openai/openai-node/issues/1349)) ([2a1d36b](https://github.com/openai/openai-node/commit/2a1d36b560323fca058f98607775642370e90a47)) + +## 4.85.4 (2025-02-22) + +Full Changelog: [v4.85.3...v4.85.4](https://github.com/openai/openai-node/compare/v4.85.3...v4.85.4) + +### Chores + +* **internal:** fix devcontainers setup ([#1343](https://github.com/openai/openai-node/issues/1343)) ([cb1ec90](https://github.com/openai/openai-node/commit/cb1ec907832e325bc29abe94ae325e0477cb87d1)) + +## 4.85.3 (2025-02-20) + +Full Changelog: [v4.85.2...v4.85.3](https://github.com/openai/openai-node/compare/v4.85.2...v4.85.3) + +### Bug Fixes + +* **parsing:** remove tool_calls default empty array ([#1341](https://github.com/openai/openai-node/issues/1341)) ([2672160](https://github.com/openai/openai-node/commit/26721608e61949daa9592483e89b79230bb9198a)) + +## 4.85.2 (2025-02-18) + +Full Changelog: [v4.85.1...v4.85.2](https://github.com/openai/openai-node/compare/v4.85.1...v4.85.2) + +### Bug Fixes + +* optimize sse chunk reading off-by-one error ([#1339](https://github.com/openai/openai-node/issues/1339)) ([c82795b](https://github.com/openai/openai-node/commit/c82795b189c73d1c0e3bc3a40d0d4a2558b0483a)) + +## 4.85.1 (2025-02-14) + +Full Changelog: [v4.85.0...v4.85.1](https://github.com/openai/openai-node/compare/v4.85.0...v4.85.1) + +### Bug Fixes + +* **client:** fix export map for index exports ([#1328](https://github.com/openai/openai-node/issues/1328)) ([647ba7a](https://github.com/openai/openai-node/commit/647ba7a52311928f604c72b2cc95698c0837887f)) +* **package:** add chat/completions.ts back in ([#1333](https://github.com/openai/openai-node/issues/1333)) ([e4b5546](https://github.com/openai/openai-node/commit/e4b554632ab1646da831f29413fefb3378c49cc1)) + + +### Chores + +* **internal:** add missing return type annotation ([#1334](https://github.com/openai/openai-node/issues/1334)) ([53e0856](https://github.com/openai/openai-node/commit/53e0856ec4d36deee4d71b5aaf436df0a59b9402)) + +## 4.85.0 (2025-02-13) + +Full Changelog: [v4.84.1...v4.85.0](https://github.com/openai/openai-node/compare/v4.84.1...v4.85.0) + +### Features + +* **api:** add support for storing chat completions ([#1327](https://github.com/openai/openai-node/issues/1327)) ([8d77f8e](https://github.com/openai/openai-node/commit/8d77f8e3c4801b7fa1e7c6f50b48c1de1f43f3e6)) + + +### Bug Fixes + +* **realtime:** call .toString() on WebSocket url ([#1324](https://github.com/openai/openai-node/issues/1324)) ([09bc50d](https://github.com/openai/openai-node/commit/09bc50d439679b6acfd2441e69ee5aa18c00e5d9)) + +## 4.84.1 (2025-02-13) + +Full Changelog: [v4.84.0...v4.84.1](https://github.com/openai/openai-node/compare/v4.84.0...v4.84.1) + +### Bug Fixes + +* **realtime:** correct websocket type var constraint ([#1321](https://github.com/openai/openai-node/issues/1321)) ([afb17ea](https://github.com/openai/openai-node/commit/afb17ea6497b860ebbe5d8e68e4a97681dd307ff)) + +## 4.84.0 (2025-02-12) + +Full Changelog: [v4.83.0...v4.84.0](https://github.com/openai/openai-node/compare/v4.83.0...v4.84.0) + +### Features + +* **pagination:** avoid fetching when has_more: false ([#1305](https://github.com/openai/openai-node/issues/1305)) ([b6944c6](https://github.com/openai/openai-node/commit/b6944c634b53c9084f2ccf777c2491e89b2cc7af)) + + +### Bug Fixes + +* **api:** add missing reasoning effort + model enums ([#1302](https://github.com/openai/openai-node/issues/1302)) ([14c55c3](https://github.com/openai/openai-node/commit/14c55c312e31f1ed46d02f39a99049f785504a53)) +* **assistants:** handle `thread.run.incomplete` event ([7032cc4](https://github.com/openai/openai-node/commit/7032cc40b8aa0a58459cf114bceb8028a8517400)) +* correctly decode multi-byte characters over multiple chunks ([#1316](https://github.com/openai/openai-node/issues/1316)) ([dd776c4](https://github.com/openai/openai-node/commit/dd776c4867401f527f699bd4b9e567890256e849)) + + +### Chores + +* **internal:** remove segfault-handler dependency ([3521ca3](https://github.com/openai/openai-node/commit/3521ca34e7f5bd51542084e27c084a5d7cc5448b)) + + +### Documentation + +* **readme:** cleanup into multiple files ([da94424](https://github.com/openai/openai-node/commit/da944242e542e9e5e51cb11853c621fc6825ac02)) + +## 4.83.0 (2025-02-05) + +Full Changelog: [v4.82.0...v4.83.0](https://github.com/openai/openai-node/compare/v4.82.0...v4.83.0) + +### Features + +* **client:** send `X-Stainless-Timeout` header ([#1299](https://github.com/openai/openai-node/issues/1299)) ([ddfc686](https://github.com/openai/openai-node/commit/ddfc686f43a3420c3adf8dec2e82b4d10a121eb8)) + + +### Bug Fixes + +* **api/types:** correct audio duration & role types ([#1300](https://github.com/openai/openai-node/issues/1300)) ([a955ac2](https://github.com/openai/openai-node/commit/a955ac2bf5bee663d530d0c82b0005bf3ce6fc47)) +* **azure/audio:** use model param for deployments ([#1297](https://github.com/openai/openai-node/issues/1297)) ([85de382](https://github.com/openai/openai-node/commit/85de382db17cbe5f112650e79d0fc1cc841efbb2)) + +## 4.82.0 (2025-01-31) + +Full Changelog: [v4.81.0...v4.82.0](https://github.com/openai/openai-node/compare/v4.81.0...v4.82.0) + +### Features + +* **api:** add o3-mini ([#1295](https://github.com/openai/openai-node/issues/1295)) ([378e2f7](https://github.com/openai/openai-node/commit/378e2f7af62c570adb4c7644a4d49576b698de41)) + + +### Bug Fixes + +* **examples/realtime:** remove duplicate `session.update` call ([#1293](https://github.com/openai/openai-node/issues/1293)) ([ad800b4](https://github.com/openai/openai-node/commit/ad800b4f9410c6838994c24a3386ea708717f72b)) +* **types:** correct metadata type + other fixes ([378e2f7](https://github.com/openai/openai-node/commit/378e2f7af62c570adb4c7644a4d49576b698de41)) + +## 4.81.0 (2025-01-29) + +Full Changelog: [v4.80.1...v4.81.0](https://github.com/openai/openai-node/compare/v4.80.1...v4.81.0) + +### Features + +* **azure:** Realtime API support ([#1287](https://github.com/openai/openai-node/issues/1287)) ([fe090c0](https://github.com/openai/openai-node/commit/fe090c0a57570217eb0b431e2cce40bf61de2b75)) + +## 4.80.1 (2025-01-24) + +Full Changelog: [v4.80.0...v4.80.1](https://github.com/openai/openai-node/compare/v4.80.0...v4.80.1) + +### Bug Fixes + +* **azure:** include retry count header ([3e0ba40](https://github.com/openai/openai-node/commit/3e0ba409e57ce276fb1f95cd11c801e4ccaad572)) + + +### Documentation + +* fix typo, "zodFunctionTool" -> "zodFunction" ([#1128](https://github.com/openai/openai-node/issues/1128)) ([b7ab6bb](https://github.com/openai/openai-node/commit/b7ab6bb304973ade94830f37eb646e800226d5ef)) +* **helpers:** fix type annotation ([fc019df](https://github.com/openai/openai-node/commit/fc019df1d9cc276e8f8e689742853a09aa94991a)) +* **readme:** fix realtime errors docs link ([#1286](https://github.com/openai/openai-node/issues/1286)) ([d1d50c8](https://github.com/openai/openai-node/commit/d1d50c897c18cefea964e8057fe1acfd766ae2bf)) + +## 4.80.0 (2025-01-22) + +Full Changelog: [v4.79.4...v4.80.0](https://github.com/openai/openai-node/compare/v4.79.4...v4.80.0) + +### Features + +* **api:** update enum values, comments, and examples ([#1280](https://github.com/openai/openai-node/issues/1280)) ([d38f2c2](https://github.com/openai/openai-node/commit/d38f2c2648b6990f217c3c7d83ca31f3739641d3)) + +## 4.79.4 (2025-01-21) + +Full Changelog: [v4.79.3...v4.79.4](https://github.com/openai/openai-node/compare/v4.79.3...v4.79.4) + +### Bug Fixes + +* **jsr:** correct zod config ([e45fa5f](https://github.com/openai/openai-node/commit/e45fa5f535ca74789636001e60e33edcad4db83c)) + + +### Chores + +* **internal:** minor restructuring ([#1278](https://github.com/openai/openai-node/issues/1278)) ([58ea92a](https://github.com/openai/openai-node/commit/58ea92a7464a04223f24ba31dbc0f7d0cf99cc19)) + + +### Documentation + +* update deprecation messages ([#1275](https://github.com/openai/openai-node/issues/1275)) ([1c6599e](https://github.com/openai/openai-node/commit/1c6599e47ef75a71cb309a1e14d97bc97bd036d0)) + +## 4.79.3 (2025-01-21) + +Full Changelog: [v4.79.2...v4.79.3](https://github.com/openai/openai-node/compare/v4.79.2...v4.79.3) + +### Bug Fixes + +* **jsr:** export zod helpers ([9dc55b6](https://github.com/openai/openai-node/commit/9dc55b62b564ad5ad1d4a60fe520b68235d05296)) + +## 4.79.2 (2025-01-21) + +Full Changelog: [v4.79.1...v4.79.2](https://github.com/openai/openai-node/compare/v4.79.1...v4.79.2) + +### Chores + +* **internal:** add test ([#1270](https://github.com/openai/openai-node/issues/1270)) ([b7c2d3d](https://github.com/openai/openai-node/commit/b7c2d3d9abd315f1452a578b0fd0d82e6ac4ff60)) + + +### Documentation + +* **readme:** fix Realtime API example link ([#1272](https://github.com/openai/openai-node/issues/1272)) ([d0653c7](https://github.com/openai/openai-node/commit/d0653c7fef48360d137a7411dfdfb95d477cdbc5)) + +## 4.79.1 (2025-01-17) + +Full Changelog: [v4.79.0...v4.79.1](https://github.com/openai/openai-node/compare/v4.79.0...v4.79.1) + +### Bug Fixes + +* **realtime:** correct import syntax ([#1267](https://github.com/openai/openai-node/issues/1267)) ([74702a7](https://github.com/openai/openai-node/commit/74702a739f566810d2b6c4e0832cfa17a1d1e272)) + +## 4.79.0 (2025-01-17) + +Full Changelog: [v4.78.1...v4.79.0](https://github.com/openai/openai-node/compare/v4.78.1...v4.79.0) + +### Features + +* **client:** add Realtime API support ([#1266](https://github.com/openai/openai-node/issues/1266)) ([7160ebe](https://github.com/openai/openai-node/commit/7160ebe647769fbf48a600c9961d1a6f86dc9622)) + + +### Bug Fixes + +* **logs/azure:** redact sensitive header when DEBUG is set ([#1218](https://github.com/openai/openai-node/issues/1218)) ([6a72fd7](https://github.com/openai/openai-node/commit/6a72fd736733db19504a829bf203b39d5b9e3644)) + + +### Chores + +* fix streaming ([379c743](https://github.com/openai/openai-node/commit/379c7435ed5d508458e9cdc22386039b84fcec5e)) +* **internal:** streaming refactors ([#1261](https://github.com/openai/openai-node/issues/1261)) ([dd4af93](https://github.com/openai/openai-node/commit/dd4af939792583854a313367c5fe2f98eea2f3c8)) +* **types:** add `| undefined` to client options properties ([#1264](https://github.com/openai/openai-node/issues/1264)) ([5e56979](https://github.com/openai/openai-node/commit/5e569799b9ac8f915b16de90d91d38b568c1edce)) +* **types:** rename vector store chunking strategy ([#1263](https://github.com/openai/openai-node/issues/1263)) ([d31acee](https://github.com/openai/openai-node/commit/d31acee860c80ba945d4e70b956c7ed75f5f849a)) + +## 4.78.1 (2025-01-10) + +Full Changelog: [v4.78.0...v4.78.1](https://github.com/openai/openai-node/compare/v4.78.0...v4.78.1) + +### Bug Fixes + +* send correct Accept header for certain endpoints ([#1257](https://github.com/openai/openai-node/issues/1257)) ([8756693](https://github.com/openai/openai-node/commit/8756693c5690b16045cdd8d33636fe7643d45f3a)) + +## 4.78.0 (2025-01-09) + +Full Changelog: [v4.77.4...v4.78.0](https://github.com/openai/openai-node/compare/v4.77.4...v4.78.0) + +### Features + +* **client:** add realtime types ([#1254](https://github.com/openai/openai-node/issues/1254)) ([7130995](https://github.com/openai/openai-node/commit/71309957a9a0883cac84b8b57697b796a9df3503)) + +## 4.77.4 (2025-01-08) + +Full Changelog: [v4.77.3...v4.77.4](https://github.com/openai/openai-node/compare/v4.77.3...v4.77.4) + +### Documentation + +* **readme:** fix misplaced period ([#1252](https://github.com/openai/openai-node/issues/1252)) ([c2fe465](https://github.com/openai/openai-node/commit/c2fe46522d59d1611ba8bb2b7e070f9be7264df0)) + +## 4.77.3 (2025-01-03) + +Full Changelog: [v4.77.2...v4.77.3](https://github.com/openai/openai-node/compare/v4.77.2...v4.77.3) + +### Chores + +* **api:** bump spec version ([#1248](https://github.com/openai/openai-node/issues/1248)) ([37b3df9](https://github.com/openai/openai-node/commit/37b3df9ac6af76fea6eace8307aab9f0565e5660)) + +## 4.77.2 (2025-01-02) + +Full Changelog: [v4.77.1...v4.77.2](https://github.com/openai/openai-node/compare/v4.77.1...v4.77.2) + +### Chores + +* bump license year ([#1246](https://github.com/openai/openai-node/issues/1246)) ([13197c1](https://github.com/openai/openai-node/commit/13197c1698f492529bd00b62d95f83c039ef0ac7)) + +## 4.77.1 (2024-12-21) + +Full Changelog: [v4.77.0...v4.77.1](https://github.com/openai/openai-node/compare/v4.77.0...v4.77.1) + +### Bug Fixes + +* **client:** normalize method ([#1235](https://github.com/openai/openai-node/issues/1235)) ([4a213da](https://github.com/openai/openai-node/commit/4a213dad6f2104dc02a75724acc62134d25db472)) + + +### Chores + +* **internal:** spec update ([#1231](https://github.com/openai/openai-node/issues/1231)) ([a97ea73](https://github.com/openai/openai-node/commit/a97ea73cafcb56e94be7ff691c4022da575cf60e)) + + +### Documentation + +* minor formatting changes ([#1236](https://github.com/openai/openai-node/issues/1236)) ([6387968](https://github.com/openai/openai-node/commit/63879681ccaca3dc1e17b27464e2f830b8f63b4f)) +* **readme:** add alpha callout ([f2eff37](https://github.com/openai/openai-node/commit/f2eff3780e1216f7f420f7b86d47f4e21986b10e)) + +## 4.77.0 (2024-12-17) + +Full Changelog: [v4.76.3...v4.77.0](https://github.com/openai/openai-node/compare/v4.76.3...v4.77.0) + +### Features + +* **api:** new o1 and GPT-4o models + preference fine-tuning ([#1229](https://github.com/openai/openai-node/issues/1229)) ([2e872d4](https://github.com/openai/openai-node/commit/2e872d4ac3717ab8f61741efffb7a31acd798338)) + + +### Chores + +* **internal:** fix some typos ([#1227](https://github.com/openai/openai-node/issues/1227)) ([d51fcfe](https://github.com/openai/openai-node/commit/d51fcfe3a66550a684eeeb0e6f17e1d9825cdf78)) +* **internal:** spec update ([#1230](https://github.com/openai/openai-node/issues/1230)) ([ed2b61d](https://github.com/openai/openai-node/commit/ed2b61d32703b64d9f91223bc02627a607f60483)) + +## 4.76.3 (2024-12-13) + +Full Changelog: [v4.76.2...v4.76.3](https://github.com/openai/openai-node/compare/v4.76.2...v4.76.3) + +### Chores + +* **internal:** better ecosystem test debugging ([86fc0a8](https://github.com/openai/openai-node/commit/86fc0a81ede2780d3fcebaabff3d9fa9a36cc9c0)) + + +### Documentation + +* **README:** fix helpers section links ([#1224](https://github.com/openai/openai-node/issues/1224)) ([efbe30a](https://github.com/openai/openai-node/commit/efbe30a156cec1836d3db28f663066b33be57ba2)) + +## 4.76.2 (2024-12-12) + +Full Changelog: [v4.76.1...v4.76.2](https://github.com/openai/openai-node/compare/v4.76.1...v4.76.2) + +### Chores + +* **internal:** update isAbsoluteURL ([#1223](https://github.com/openai/openai-node/issues/1223)) ([e908ed7](https://github.com/openai/openai-node/commit/e908ed759996fb7706baf46d094fc77419423971)) +* **types:** nicer error class types + jsdocs ([#1219](https://github.com/openai/openai-node/issues/1219)) ([576d24c](https://github.com/openai/openai-node/commit/576d24cc4b3d766dfe28a6031bdc24ac1b711655)) + +## 4.76.1 (2024-12-10) + +Full Changelog: [v4.76.0...v4.76.1](https://github.com/openai/openai-node/compare/v4.76.0...v4.76.1) + +### Chores + +* **internal:** bump cross-spawn to v7.0.6 ([#1217](https://github.com/openai/openai-node/issues/1217)) ([c07ad29](https://github.com/openai/openai-node/commit/c07ad298d58e5aeaf816ee3de65fd59bf3fc8b66)) +* **internal:** remove unnecessary getRequestClient function ([#1215](https://github.com/openai/openai-node/issues/1215)) ([bef3925](https://github.com/openai/openai-node/commit/bef392526cd339f45c574bc476649c77be36c612)) + +## 4.76.0 (2024-12-05) + +Full Changelog: [v4.75.0...v4.76.0](https://github.com/openai/openai-node/compare/v4.75.0...v4.76.0) + +### Features + +* **api:** updates ([#1212](https://github.com/openai/openai-node/issues/1212)) ([e0fedf2](https://github.com/openai/openai-node/commit/e0fedf2c5a91d0c03d8dad6854b366f77eab4923)) + + +### Chores + +* bump openapi url ([#1210](https://github.com/openai/openai-node/issues/1210)) ([3fa95a4](https://github.com/openai/openai-node/commit/3fa95a429d4b2adecce35a7b96b73f6d5e88eeeb)) + +## 4.75.0 (2024-12-03) + +Full Changelog: [v4.74.0...v4.75.0](https://github.com/openai/openai-node/compare/v4.74.0...v4.75.0) + +### Features + +* improve docs for jsr README.md ([#1208](https://github.com/openai/openai-node/issues/1208)) ([338527e](https://github.com/openai/openai-node/commit/338527e40361e2de899a63f280d4ec2db5e87f3c)) + +## 4.74.0 (2024-12-02) + +Full Changelog: [v4.73.1...v4.74.0](https://github.com/openai/openai-node/compare/v4.73.1...v4.74.0) + +### Features + +* **internal:** make git install file structure match npm ([#1204](https://github.com/openai/openai-node/issues/1204)) ([e7c4c6d](https://github.com/openai/openai-node/commit/e7c4c6d23adbe52300053a8d35db6e341c438703)) + +## 4.73.1 (2024-11-25) + +Full Changelog: [v4.73.0...v4.73.1](https://github.com/openai/openai-node/compare/v4.73.0...v4.73.1) + +### Documentation + +* **readme:** mention `.withResponse()` for streaming request ID ([#1202](https://github.com/openai/openai-node/issues/1202)) ([b6800d4](https://github.com/openai/openai-node/commit/b6800d4dea2729fe3b0864171ce8fb3b2cc1b21c)) + +## 4.73.0 (2024-11-20) + +Full Changelog: [v4.72.0...v4.73.0](https://github.com/openai/openai-node/compare/v4.72.0...v4.73.0) + +### Features + +* **api:** add gpt-4o-2024-11-20 model ([#1201](https://github.com/openai/openai-node/issues/1201)) ([0feeafd](https://github.com/openai/openai-node/commit/0feeafd21ba4b6281cc3b9dafa2919b1e2e4d1c3)) +* bump model in all example snippets to gpt-4o ([6961c37](https://github.com/openai/openai-node/commit/6961c37f2e581bcc12ec2bbe77df2b9b260fe297)) + + +### Bug Fixes + +* **docs:** add missing await to pagination example ([#1190](https://github.com/openai/openai-node/issues/1190)) ([524b9e8](https://github.com/openai/openai-node/commit/524b9e82ae13a3b5093dcfbfd1169a798cf99ab4)) + + +### Chores + +* **client:** drop unused devDependency ([#1191](https://github.com/openai/openai-node/issues/1191)) ([8ee6c03](https://github.com/openai/openai-node/commit/8ee6c0335673f2ecf84ea11bdfc990adab607e20)) +* **internal:** spec update ([#1195](https://github.com/openai/openai-node/issues/1195)) ([12f9334](https://github.com/openai/openai-node/commit/12f93346857196b93f94865cc3744d769e5e519c)) +* **internal:** use reexports not destructuring ([#1181](https://github.com/openai/openai-node/issues/1181)) ([f555dd6](https://github.com/openai/openai-node/commit/f555dd6503bc4ccd4d13f4e1a1d36fbbfd51c369)) + + +### Documentation + +* bump models in example snippets to gpt-4o ([#1184](https://github.com/openai/openai-node/issues/1184)) ([4ec4027](https://github.com/openai/openai-node/commit/4ec402790cf3cfbccbf3ef9b61d577b0118977e8)) +* change readme title ([#1198](https://github.com/openai/openai-node/issues/1198)) ([e34981c](https://github.com/openai/openai-node/commit/e34981c00f2f0360baffe870bcc38786030671bf)) +* improve jsr documentation ([#1197](https://github.com/openai/openai-node/issues/1197)) ([ebdb4f7](https://github.com/openai/openai-node/commit/ebdb4f72cc01afbee649aca009fdaf413e61c507)) +* **readme:** fix incorrect fileBatches.uploadAndPoll params ([#1200](https://github.com/openai/openai-node/issues/1200)) ([3968ef1](https://github.com/openai/openai-node/commit/3968ef1c4fa860ff246e0e803808752b261c18ce)) + +## 4.72.0 (2024-11-12) + +Full Changelog: [v4.71.1...v4.72.0](https://github.com/openai/openai-node/compare/v4.71.1...v4.72.0) + +### Features + +* add back deno runtime testing without type checks ([1626cf5](https://github.com/openai/openai-node/commit/1626cf57e94706e1fc8b2f9ff4f173fe486d5150)) + + +### Chores + +* **ecosystem-tests:** bump wrangler version ([#1178](https://github.com/openai/openai-node/issues/1178)) ([4dfb0c6](https://github.com/openai/openai-node/commit/4dfb0c6aa7c4530665bc7d6beebcd04aa1490e27)) + +## 4.71.1 (2024-11-06) + +Full Changelog: [v4.71.0...v4.71.1](https://github.com/openai/openai-node/compare/v4.71.0...v4.71.1) + +### Bug Fixes + +* change release please configuration for jsr.json ([#1174](https://github.com/openai/openai-node/issues/1174)) ([c39efba](https://github.com/openai/openai-node/commit/c39efba812209c8906315596cc0a56e54ae8590a)) + +## 4.71.0 (2024-11-04) + +Full Changelog: [v4.70.3...v4.71.0](https://github.com/openai/openai-node/compare/v4.70.3...v4.71.0) + +### Features + +* **api:** add support for predicted outputs ([#1172](https://github.com/openai/openai-node/issues/1172)) ([08a7bb4](https://github.com/openai/openai-node/commit/08a7bb4d4b751aeed9655bfcb9fa27fc79a767c4)) + +## 4.70.3 (2024-11-04) + +Full Changelog: [v4.70.2...v4.70.3](https://github.com/openai/openai-node/compare/v4.70.2...v4.70.3) + +### Bug Fixes + +* change streaming helper imports to be relative ([e73b7cf](https://github.com/openai/openai-node/commit/e73b7cf84272bd02a39a67795d49db23db2d970f)) + +## 4.70.2 (2024-11-01) + +Full Changelog: [v4.70.1...v4.70.2](https://github.com/openai/openai-node/compare/v4.70.1...v4.70.2) + +### Bug Fixes + +* add permissions to github workflow ([ee75e00](https://github.com/openai/openai-node/commit/ee75e00b0fbf82553b219ee8948a8077e9c26a24)) +* skip deno ecosystem test ([5b181b0](https://github.com/openai/openai-node/commit/5b181b01b62139f8da35d426914c82b8425af141)) + +## 4.70.1 (2024-11-01) + +Full Changelog: [v4.70.0...v4.70.1](https://github.com/openai/openai-node/compare/v4.70.0...v4.70.1) + +### Bug Fixes + +* don't require deno to run build-deno ([#1167](https://github.com/openai/openai-node/issues/1167)) ([9d857bc](https://github.com/openai/openai-node/commit/9d857bc531a0bb3939f7660e49b31ccc38f60dd3)) + +## 4.70.0 (2024-11-01) + +Full Changelog: [v4.69.0...v4.70.0](https://github.com/openai/openai-node/compare/v4.69.0...v4.70.0) + +### Features + +* publish to jsr ([#1165](https://github.com/openai/openai-node/issues/1165)) ([5aa93a7](https://github.com/openai/openai-node/commit/5aa93a7fe704ef1ad077787852db38dc29104534)) + + +### Chores + +* **internal:** fix isolated modules exports ([9cd1958](https://github.com/openai/openai-node/commit/9cd19584dcc6f4004ea1adcee917aa88a37d5f1c)) + + +### Refactors + +* use type imports for type-only imports ([#1159](https://github.com/openai/openai-node/issues/1159)) ([07bbaf6](https://github.com/openai/openai-node/commit/07bbaf6ecac9a5e36471a35488020853ddf9214f)) + +## 4.69.0 (2024-10-30) + +Full Changelog: [v4.68.4...v4.69.0](https://github.com/openai/openai-node/compare/v4.68.4...v4.69.0) + +### Features + +* **api:** add new, expressive voices for Realtime and Audio in Chat Completions ([#1157](https://github.com/openai/openai-node/issues/1157)) ([12e501c](https://github.com/openai/openai-node/commit/12e501c8a215a2af29b9b8fceedc5935b6f2feef)) + + +### Bug Fixes + +* **internal:** support pnpm git installs ([#1156](https://github.com/openai/openai-node/issues/1156)) ([b744c5b](https://github.com/openai/openai-node/commit/b744c5b609533e9a6694d6cae0425fe9cd37e26c)) + + +### Documentation + +* **readme:** minor typo fixes ([#1154](https://github.com/openai/openai-node/issues/1154)) ([c6c9f9a](https://github.com/openai/openai-node/commit/c6c9f9aaf75f643016ad73574a7e24a228b5c60f)) + +## 4.68.4 (2024-10-23) + +Full Changelog: [v4.68.3...v4.68.4](https://github.com/openai/openai-node/compare/v4.68.3...v4.68.4) + +### Chores + +* **internal:** update spec version ([#1146](https://github.com/openai/openai-node/issues/1146)) ([0165a8d](https://github.com/openai/openai-node/commit/0165a8d79340ede49557e05fd00d6fff9d69d930)) + +## 4.68.3 (2024-10-23) + +Full Changelog: [v4.68.2...v4.68.3](https://github.com/openai/openai-node/compare/v4.68.2...v4.68.3) + +### Chores + +* **internal:** bumps eslint and related dependencies ([#1143](https://github.com/openai/openai-node/issues/1143)) ([2643f42](https://github.com/openai/openai-node/commit/2643f42a36208c36daf23470ffcd227a891284eb)) + +## 4.68.2 (2024-10-22) + +Full Changelog: [v4.68.1...v4.68.2](https://github.com/openai/openai-node/compare/v4.68.1...v4.68.2) + +### Chores + +* **internal:** update spec version ([#1141](https://github.com/openai/openai-node/issues/1141)) ([2ccb3e3](https://github.com/openai/openai-node/commit/2ccb3e357aa2f3eb0fa32c619d8336c3b94cc882)) + +## 4.68.1 (2024-10-18) + +Full Changelog: [v4.68.0...v4.68.1](https://github.com/openai/openai-node/compare/v4.68.0...v4.68.1) + +### Bug Fixes + +* **client:** respect x-stainless-retry-count default headers ([#1138](https://github.com/openai/openai-node/issues/1138)) ([266717b](https://github.com/openai/openai-node/commit/266717b3301828c7df735064a380a055576183bc)) + +## 4.68.0 (2024-10-17) + +Full Changelog: [v4.67.3...v4.68.0](https://github.com/openai/openai-node/compare/v4.67.3...v4.68.0) + +### Features + +* **api:** add gpt-4o-audio-preview model for chat completions ([#1135](https://github.com/openai/openai-node/issues/1135)) ([17a623f](https://github.com/openai/openai-node/commit/17a623f70050bca4538ad2939055cd9d9b165f89)) + ## 4.67.3 (2024-10-08) Full Changelog: [v4.67.2...v4.67.3](https://github.com/openai/openai-node/compare/v4.67.2...v4.67.3) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e8bbc1b07..dde09d52d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ ## Setting up the environment -This repository uses [`yarn@v1`](https://classic.yarnpkg.com/lang/en/docs/install/#mac-stable). +This repository uses [`yarn@v1`](https://classic.yarnpkg.com/lang/en/docs/install). Other package managers may work but are not officially supported for development. To set up the repository, run: @@ -29,10 +29,10 @@ All files in the `examples/` directory are not modified by the generator and can … ``` -``` -chmod +x examples/.ts +```sh +$ chmod +x examples/.ts # run the example against your api -yarn tsn -T examples/.ts +$ yarn tsn -T examples/.ts ``` ## Using the repository from source diff --git a/LICENSE b/LICENSE index 621a6becf..f011417af 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2024 OpenAI + Copyright 2025 OpenAI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index 407933634..bbf72226a 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# OpenAI Node API Library +# OpenAI TypeScript and JavaScript API Library -[![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) +[![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) [![JSR Version](https://jsr.io/badges/@openai/openai)](https://jsr.io/@openai/openai) This library provides convenient access to the OpenAI REST API from TypeScript or JavaScript. @@ -14,69 +14,25 @@ To learn how to use the OpenAI API, check out our [API Reference](https://platfo npm install openai ``` -You can import in Deno via: +### Installation from JSR - - -```ts -import OpenAI from 'https://deno.land/x/openai@v4.67.3/mod.ts'; -``` - - - -## Usage - -The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). The code below shows how to get started using the chat completions API. - - -```js -import OpenAI from 'openai'; - -const client = new OpenAI({ - apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted -}); - -async function main() { - const chatCompletion = await client.chat.completions.create({ - messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-3.5-turbo', - }); -} - -main(); +```sh +deno add jsr:@openai/openai +npx jsr add @openai/openai ``` -## Streaming responses - -We provide support for streaming responses using Server Sent Events (SSE). +These commands will make the module importable from the `@openai/openai` scope. You can also [import directly from JSR](https://jsr.io/docs/using-packages#importing-with-jsr-specifiers) without an install step if you're using the Deno JavaScript runtime: ```ts -import OpenAI from 'openai'; - -const client = new OpenAI(); - -async function main() { - const stream = await client.chat.completions.create({ - model: 'gpt-4', - messages: [{ role: 'user', content: 'Say this is a test' }], - stream: true, - }); - for await (const chunk of stream) { - process.stdout.write(chunk.choices[0]?.delta?.content || ''); - } -} - -main(); +import OpenAI from 'jsr:@openai/openai'; ``` -If you need to cancel a stream, you can `break` from the loop -or call `stream.controller.abort()`. +## Usage -### Request & Response types +The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). -This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: +The primary API for interacting with OpenAI models is the [Responses API](https://platform.openai.com/docs/api-reference/responses). You can generate text from the model with the code below. - ```ts import OpenAI from 'openai'; @@ -84,209 +40,55 @@ const client = new OpenAI({ apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted }); -async function main() { - const params: OpenAI.Chat.ChatCompletionCreateParams = { - messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-3.5-turbo', - }; - const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params); -} - -main(); -``` - -Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. - -> [!IMPORTANT] -> Previous versions of this SDK used a `Configuration` class. See the [v3 to v4 migration guide](https://github.com/openai/openai-node/discussions/217). - -### Polling Helpers - -When interacting with the API some actions such as starting a Run and adding files to vector stores are asynchronous and take time to complete. The SDK includes -helper functions which will poll the status until it reaches a terminal state and then return the resulting object. -If an API method results in an action which could benefit from polling there will be a corresponding version of the -method ending in 'AndPoll'. - -For instance to create a Run and poll until it reaches a terminal state you can run: - -```ts -const run = await openai.beta.threads.runs.createAndPoll(thread.id, { - assistant_id: assistantId, +const response = await client.responses.create({ + model: 'gpt-4o', + instructions: 'You are a coding assistant that talks like a pirate', + input: 'Are semicolons optional in JavaScript?', }); -``` - -More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/deep-dive/run-lifecycle) - -### Bulk Upload Helpers -When creating and interacting with vector stores, you can use the polling helpers to monitor the status of operations. -For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once. - -```ts -const fileList = [ - createReadStream('/home/data/example.pdf'), - ... -]; - -const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, fileList); +console.log(response.output_text); ``` -### Streaming Helpers - -The SDK also includes helpers to process streams and handle the incoming events. - -```ts -const run = openai.beta.threads.runs - .stream(thread.id, { - assistant_id: assistant.id, - }) - .on('textCreated', (text) => process.stdout.write('\nassistant > ')) - .on('textDelta', (textDelta, snapshot) => process.stdout.write(textDelta.value)) - .on('toolCallCreated', (toolCall) => process.stdout.write(`\nassistant > ${toolCall.type}\n\n`)) - .on('toolCallDelta', (toolCallDelta, snapshot) => { - if (toolCallDelta.type === 'code_interpreter') { - if (toolCallDelta.code_interpreter.input) { - process.stdout.write(toolCallDelta.code_interpreter.input); - } - if (toolCallDelta.code_interpreter.outputs) { - process.stdout.write('\noutput >\n'); - toolCallDelta.code_interpreter.outputs.forEach((output) => { - if (output.type === 'logs') { - process.stdout.write(`\n${output.logs}\n`); - } - }); - } - } - }); -``` - -More information on streaming helpers can be found in the dedicated documentation: [helpers.md](helpers.md) - -### Streaming responses - -This library provides several conveniences for streaming chat completions, for example: +The previous standard (supported indefinitely) for generating text is the [Chat Completions API](https://platform.openai.com/docs/api-reference/chat). You can use that API to generate text from the model with the code below. ```ts import OpenAI from 'openai'; -const openai = new OpenAI(); - -async function main() { - const stream = await openai.beta.chat.completions.stream({ - model: 'gpt-4', - messages: [{ role: 'user', content: 'Say this is a test' }], - stream: true, - }); - - stream.on('content', (delta, snapshot) => { - process.stdout.write(delta); - }); - - // or, equivalently: - for await (const chunk of stream) { - process.stdout.write(chunk.choices[0]?.delta?.content || ''); - } +const client = new OpenAI({ + apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted +}); - const chatCompletion = await stream.finalChatCompletion(); - console.log(chatCompletion); // {id: "…", choices: […], …} -} +const completion = await client.chat.completions.create({ + model: 'gpt-4o', + messages: [ + { role: 'developer', content: 'Talk like a pirate.' }, + { role: 'user', content: 'Are semicolons optional in JavaScript?' }, + ], +}); -main(); +console.log(completion.choices[0].message.content); ``` -Streaming with `openai.beta.chat.completions.stream({…})` exposes -[various helpers for your convenience](helpers.md#events) including event handlers and promises. - -Alternatively, you can use `openai.chat.completions.create({ stream: true, … })` -which only returns an async iterable of the chunks in the stream and thus uses less memory -(it does not build up a final chat completion object for you). - -If you need to cancel a stream, you can `break` from a `for await` loop or call `stream.abort()`. - -### Automated function calls - -We provide the `openai.beta.chat.completions.runTools({…})` -convenience helper for using function tool calls with the `/chat/completions` endpoint -which automatically call the JavaScript functions you provide -and sends their results back to the `/chat/completions` endpoint, -looping as long as the model requests tool calls. - -If you pass a `parse` function, it will automatically parse the `arguments` for you -and returns any parsing errors to the model to attempt auto-recovery. -Otherwise, the args will be passed to the function you provide as a string. +## Streaming responses -If you pass `tool_choice: {function: {name: …}}` instead of `auto`, -it returns immediately after calling that function (and only loops to auto-recover parsing errors). +We provide support for streaming responses using Server Sent Events (SSE). ```ts import OpenAI from 'openai'; const client = new OpenAI(); -async function main() { - const runner = client.beta.chat.completions - .runTools({ - model: 'gpt-3.5-turbo', - messages: [{ role: 'user', content: 'How is the weather this week?' }], - tools: [ - { - type: 'function', - function: { - function: getCurrentLocation, - parameters: { type: 'object', properties: {} }, - }, - }, - { - type: 'function', - function: { - function: getWeather, - parse: JSON.parse, // or use a validation library like zod for typesafe parsing. - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - }, - }, - }, - }, - ], - }) - .on('message', (message) => console.log(message)); - - const finalContent = await runner.finalContent(); - console.log(); - console.log('Final content:', finalContent); -} - -async function getCurrentLocation() { - return 'Boston'; // Simulate lookup -} +const stream = await client.responses.create({ + model: 'gpt-4o', + input: 'Say "Sheep sleep deep" ten times fast!', + stream: true, +}); -async function getWeather(args: { location: string }) { - const { location } = args; - // … do lookup … - return { temperature, precipitation }; +for await (const event of stream) { + console.log(event); } - -main(); - -// {role: "user", content: "How's the weather this week?"} -// {role: "assistant", tool_calls: [{type: "function", function: {name: "getCurrentLocation", arguments: "{}"}, id: "123"} -// {role: "tool", name: "getCurrentLocation", content: "Boston", tool_call_id: "123"} -// {role: "assistant", tool_calls: [{type: "function", function: {name: "getWeather", arguments: '{"location": "Boston"}'}, id: "1234"}]} -// {role: "tool", name: "getWeather", content: '{"temperature": "50degF", "preciptation": "high"}', tool_call_id: "1234"} -// {role: "assistant", content: "It's looking cold and rainy - you might want to wear a jacket!"} -// -// Final content: "It's looking cold and rainy - you might want to wear a jacket!" ``` -Like with `.stream()`, we provide a variety of [helpers and events](helpers.md#events). - -Note that `runFunctions` was previously available as well, but has been deprecated in favor of `runTools`. - -Read more about various examples such as with integrating with [zod](helpers.md#integrate-with-zod), -[next.js](helpers.md#integrate-wtih-next-js), and [proxying a stream to the browser](helpers.md#proxy-streaming-to-a-browser). - ## File uploads Request parameters that correspond to file uploads can be passed in many different forms: @@ -333,9 +135,10 @@ a subclass of `APIError` will be thrown: ```ts async function main() { const job = await client.fineTuning.jobs - .create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123' }) + .create({ model: 'gpt-4o', training_file: 'file-abc123' }) .catch(async (err) => { if (err instanceof OpenAI.APIError) { + console.log(err.request_id); console.log(err.status); // 400 console.log(err.name); // BadRequestError console.log(err.headers); // {server: 'nginx', ...} @@ -348,7 +151,7 @@ async function main() { main(); ``` -Error codes are as followed: +Error codes are as follows: | Status Code | Error Type | | ----------- | -------------------------- | @@ -361,44 +164,6 @@ Error codes are as followed: | >=500 | `InternalServerError` | | N/A | `APIConnectionError` | -## Request IDs - -> For more information on debugging requests, see [these docs](https://platform.openai.com/docs/api-reference/debugging-requests) - -All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. - -```ts -const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4' }); -console.log(completion._request_id) // req_123 -``` - -## Microsoft Azure OpenAI - -To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` -class instead of the `OpenAI` class. - -> [!IMPORTANT] -> The Azure API shape slightly differs from the core API shape which means that the static types for responses / params -> won't always be correct. - -```ts -import { AzureOpenAI } from 'openai'; -import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; - -const credential = new DefaultAzureCredential(); -const scope = 'https://cognitiveservices.azure.com/.default'; -const azureADTokenProvider = getBearerTokenProvider(credential, scope); - -const openai = new AzureOpenAI({ azureADTokenProvider }); - -const result = await openai.chat.completions.create({ - model: 'gpt-4-1106-preview', - messages: [{ role: 'user', content: 'Say hello!' }], -}); - -console.log(result.choices[0]!.message?.content); -``` - ### Retries Certain errors will be automatically retried 2 times by default, with a short exponential backoff. @@ -415,7 +180,7 @@ const client = new OpenAI({ }); // Or, configure per-request: -await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, { +await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in JavaScript?' }], model: 'gpt-4o' }, { maxRetries: 5, }); ``` @@ -432,7 +197,7 @@ const client = new OpenAI({ }); // Override per-request: -await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, { +await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-4o' }, { timeout: 5 * 1000, }); ``` @@ -441,10 +206,33 @@ On timeout, an `APIConnectionTimeoutError` is thrown. Note that requests which time out will be [retried twice by default](#retries). +## Request IDs + +> For more information on debugging requests, see [these docs](https://platform.openai.com/docs/api-reference/debugging-requests) + +All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. + +```ts +const response = await client.responses.create({ model: 'gpt-4o', input: 'testing 123' }); +console.log(response._request_id) // req_123 +``` + +You can also access the Request ID using the `.withResponse()` method: + +```ts +const { data: stream, request_id } = await openai.responses + .create({ + model: 'gpt-4o', + input: 'Say this is a test', + stream: true, + }) + .withResponse(); +``` + ## Auto-pagination List methods in the OpenAI API are paginated. -You can use `for await … of` syntax to iterate through items across all pages: +You can use the `for await … of` syntax to iterate through items across all pages: ```ts async function fetchAllFineTuningJobs(params) { @@ -457,7 +245,7 @@ async function fetchAllFineTuningJobs(params) { } ``` -Alternatively, you can make request a single page at a time: +Alternatively, you can request a single page at a time: ```ts let page = await client.fineTuning.jobs.list({ limit: 20 }); @@ -467,11 +255,54 @@ for (const fineTuningJob of page.data) { // Convenience methods are provided for manually paginating: while (page.hasNextPage()) { - page = page.getNextPage(); + page = await page.getNextPage(); // ... } ``` +## Realtime API Beta + +The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a `WebSocket` connection. + +```ts +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; + +const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + +rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); +``` + +For more information see [realtime.md](realtime.md). + +## Microsoft Azure OpenAI + +To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` +class instead of the `OpenAI` class. + +> [!IMPORTANT] +> The Azure API shape slightly differs from the core API shape which means that the static types for responses / params +> won't always be correct. + +```ts +import { AzureOpenAI } from 'openai'; +import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; + +const credential = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const azureADTokenProvider = getBearerTokenProvider(credential, scope); + +const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: "" }); + +const result = await openai.chat.completions.create({ + model: 'gpt-4o', + messages: [{ role: 'user', content: 'Say hello!' }], +}); + +console.log(result.choices[0]!.message?.content); +``` + +For more information on support for the Azure API, see [azure.md](azure.md). + ## Advanced Usage ### Accessing raw Response data (e.g., headers) @@ -484,17 +315,19 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi ```ts const client = new OpenAI(); -const response = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' }) +const httpResponse = await client.responses + .create({ model: 'gpt-4o', input: 'say this is a test.' }) .asResponse(); -console.log(response.headers.get('X-My-Header')); -console.log(response.statusText); // access the underlying Response object -const { data: chatCompletion, response: raw } = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' }) +// access the underlying web standard Response object +console.log(httpResponse.headers.get('X-My-Header')); +console.log(httpResponse.statusText); + +const { data: modelResponse, response: raw } = await client.responses + .create({ model: 'gpt-4o', input: 'say this is a test.' }) .withResponse(); console.log(raw.headers.get('X-My-Header')); -console.log(chatCompletion); +console.log(modelResponse); ``` ### Making custom/undocumented requests @@ -543,6 +376,11 @@ validate or strip extra properties from the response from the API. ### Customizing the fetch client +> We're actively working on a new alpha version that migrates from `node-fetch` to builtin fetch. +> +> Please try it out and let us know if you run into any issues! +> https://community.openai.com/t/your-feedback-requested-node-js-sdk-5-0-0-alpha/1063774 + By default, this library uses `node-fetch` in Node, and expects a global `fetch` function in other environments. If you would prefer to use a global, web-standards-compliant `fetch` function even in a Node environment, @@ -608,7 +446,7 @@ await client.models.list({ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: 1. Changes that only affect static types, without breaking runtime behavior. -2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_. +2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals.)_ 3. Changes that we do not expect to impact the vast majority of users in practice. We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. @@ -622,7 +460,7 @@ TypeScript >= 4.5 is supported. The following runtimes are supported: - Node.js 18 LTS or later ([non-EOL](https://endoflife.date/nodejs)) versions. -- Deno v1.28.0 or higher, using `import OpenAI from "npm:openai"`. +- Deno v1.28.0 or higher. - Bun 1.0 or later. - Cloudflare Workers. - Vercel Edge Runtime. diff --git a/SECURITY.md b/SECURITY.md index c54acaf33..3b3bd8a66 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,9 +2,9 @@ ## Reporting Security Issues -This SDK is generated by [Stainless Software Inc](http://stainlessapi.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. +This SDK is generated by [Stainless Software Inc](http://stainless.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. -To report a security issue, please contact the Stainless team at security@stainlessapi.com. +To report a security issue, please contact the Stainless team at security@stainless.com. ## Responsible Disclosure diff --git a/api.md b/api.md index 71027acfd..cad696e7e 100644 --- a/api.md +++ b/api.md @@ -2,12 +2,20 @@ Types: +- AllModels +- ChatModel +- ComparisonFilter +- CompoundFilter - ErrorObject - FunctionDefinition - FunctionParameters +- Metadata +- Reasoning +- ReasoningEffort - ResponseFormatJSONObject - ResponseFormatJSONSchema - ResponseFormatText +- ResponsesModel # Completions @@ -31,32 +39,51 @@ Types: Types: -- ChatCompletion -- ChatCompletionAssistantMessageParam -- ChatCompletionChunk -- ChatCompletionContentPart -- ChatCompletionContentPartImage -- ChatCompletionContentPartRefusal -- ChatCompletionContentPartText -- ChatCompletionFunctionCallOption -- ChatCompletionFunctionMessageParam -- ChatCompletionMessage -- ChatCompletionMessageParam -- ChatCompletionMessageToolCall -- ChatCompletionNamedToolChoice -- ChatCompletionRole -- ChatCompletionStreamOptions -- ChatCompletionSystemMessageParam -- ChatCompletionTokenLogprob -- ChatCompletionTool -- ChatCompletionToolChoiceOption -- ChatCompletionToolMessageParam -- ChatCompletionUserMessageParam -- CreateChatCompletionRequestMessage +- ChatCompletion +- ChatCompletionAssistantMessageParam +- ChatCompletionAudio +- ChatCompletionAudioParam +- ChatCompletionChunk +- ChatCompletionContentPart +- ChatCompletionContentPartImage +- ChatCompletionContentPartInputAudio +- ChatCompletionContentPartRefusal +- ChatCompletionContentPartText +- ChatCompletionDeleted +- ChatCompletionDeveloperMessageParam +- ChatCompletionFunctionCallOption +- ChatCompletionFunctionMessageParam +- ChatCompletionMessage +- ChatCompletionMessageParam +- ChatCompletionMessageToolCall +- ChatCompletionModality +- ChatCompletionNamedToolChoice +- ChatCompletionPredictionContent +- ChatCompletionRole +- ChatCompletionStoreMessage +- ChatCompletionStreamOptions +- ChatCompletionSystemMessageParam +- ChatCompletionTokenLogprob +- ChatCompletionTool +- ChatCompletionToolChoiceOption +- ChatCompletionToolMessageParam +- ChatCompletionUserMessageParam +- CreateChatCompletionRequestMessage +- ChatCompletionReasoningEffort Methods: -- client.chat.completions.create({ ...params }) -> ChatCompletion +- client.chat.completions.create({ ...params }) -> ChatCompletion +- client.chat.completions.retrieve(completionId) -> ChatCompletion +- client.chat.completions.update(completionId, { ...params }) -> ChatCompletion +- client.chat.completions.list({ ...params }) -> ChatCompletionsPage +- client.chat.completions.del(completionId) -> ChatCompletionDeleted + +### Messages + +Methods: + +- client.chat.completions.messages.list(completionId, { ...params }) -> ChatCompletionStoreMessagesPage # Embeddings @@ -115,7 +142,11 @@ Types: Types: - Transcription +- TranscriptionInclude - TranscriptionSegment +- TranscriptionStreamEvent +- TranscriptionTextDeltaEvent +- TranscriptionTextDoneEvent - TranscriptionVerbose - TranscriptionWord - TranscriptionCreateResponse @@ -176,6 +207,17 @@ Methods: # FineTuning +## Methods + +Types: + +- DpoHyperparameters +- DpoMethod +- ReinforcementHyperparameters +- ReinforcementMethod +- SupervisedHyperparameters +- SupervisedMethod + ## Jobs Types: @@ -193,6 +235,8 @@ Methods: - client.fineTuning.jobs.list({ ...params }) -> FineTuningJobsPage - client.fineTuning.jobs.cancel(fineTuningJobId) -> FineTuningJob - client.fineTuning.jobs.listEvents(fineTuningJobId, { ...params }) -> FineTuningJobEventsPage +- client.fineTuning.jobs.pause(fineTuningJobId) -> FineTuningJob +- client.fineTuning.jobs.resume(fineTuningJobId) -> FineTuningJob ### Checkpoints @@ -204,73 +248,186 @@ Methods: - client.fineTuning.jobs.checkpoints.list(fineTuningJobId, { ...params }) -> FineTuningJobCheckpointsPage -# Beta +## Checkpoints + +### Permissions + +Types: + +- PermissionCreateResponse +- PermissionRetrieveResponse +- PermissionDeleteResponse + +Methods: + +- client.fineTuning.checkpoints.permissions.create(fineTunedModelCheckpoint, { ...params }) -> PermissionCreateResponsesPage +- client.fineTuning.checkpoints.permissions.retrieve(fineTunedModelCheckpoint, { ...params }) -> PermissionRetrieveResponse +- client.fineTuning.checkpoints.permissions.del(fineTunedModelCheckpoint, permissionId) -> PermissionDeleteResponse + +## Alpha + +### Graders + +Types: + +- GraderRunResponse +- GraderValidateResponse + +Methods: + +- client.fineTuning.alpha.graders.run({ ...params }) -> GraderRunResponse +- client.fineTuning.alpha.graders.validate({ ...params }) -> GraderValidateResponse + +# Graders + +## GraderModels + +Types: + +- LabelModelGrader +- MultiGrader +- PythonGrader +- ScoreModelGrader +- StringCheckGrader +- TextSimilarityGrader -## VectorStores +# VectorStores Types: -- AutoFileChunkingStrategyParam -- FileChunkingStrategy -- FileChunkingStrategyParam -- OtherFileChunkingStrategyObject -- StaticFileChunkingStrategy -- StaticFileChunkingStrategyObject -- StaticFileChunkingStrategyParam -- VectorStore -- VectorStoreDeleted +- AutoFileChunkingStrategyParam +- FileChunkingStrategy +- FileChunkingStrategyParam +- OtherFileChunkingStrategyObject +- StaticFileChunkingStrategy +- StaticFileChunkingStrategyObject +- StaticFileChunkingStrategyObjectParam +- VectorStore +- VectorStoreDeleted +- VectorStoreSearchResponse Methods: -- client.beta.vectorStores.create({ ...params }) -> VectorStore -- client.beta.vectorStores.retrieve(vectorStoreId) -> VectorStore -- client.beta.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore -- client.beta.vectorStores.list({ ...params }) -> VectorStoresPage -- client.beta.vectorStores.del(vectorStoreId) -> VectorStoreDeleted +- client.vectorStores.create({ ...params }) -> VectorStore +- client.vectorStores.retrieve(vectorStoreId) -> VectorStore +- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore +- client.vectorStores.list({ ...params }) -> VectorStoresPage +- client.vectorStores.del(vectorStoreId) -> VectorStoreDeleted +- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponsesPage -### Files +## Files Types: -- VectorStoreFile -- VectorStoreFileDeleted +- VectorStoreFile +- VectorStoreFileDeleted +- FileContentResponse Methods: -- client.beta.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile -- client.beta.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile -- client.beta.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesPage -- client.beta.vectorStores.files.del(vectorStoreId, fileId) -> VectorStoreFileDeleted -- client.beta.vectorStores.files.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.poll(vectorStoreId, fileId, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.upload(vectorStoreId, file, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.uploadAndPoll(vectorStoreId, file, options?) -> Promise<VectorStoreFile> +- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile +- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesPage +- client.vectorStores.files.del(vectorStoreId, fileId) -> VectorStoreFileDeleted +- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponsesPage +- client.beta.vectorStores.files.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.poll(vectorStoreId, fileId, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.upload(vectorStoreId, file, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.uploadAndPoll(vectorStoreId, file, options?) -> Promise<VectorStoreFile> -### FileBatches +## FileBatches Types: -- VectorStoreFileBatch +- VectorStoreFileBatch Methods: -- client.beta.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesPage -- client.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFileBatch> -- client.beta.vectorStores.fileBatches.poll(vectorStoreId, batchId, options?) -> Promise<VectorStoreFileBatch> -- client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options?) -> Promise<VectorStoreFileBatch> +- client.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesPage +- client.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFileBatch> +- client.beta.vectorStores.fileBatches.poll(vectorStoreId, batchId, options?) -> Promise<VectorStoreFileBatch> +- client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options?) -> Promise<VectorStoreFileBatch> -## Chat +# Beta + +## Realtime + +Types: + +- ConversationCreatedEvent +- ConversationItem +- ConversationItemContent +- ConversationItemCreateEvent +- ConversationItemCreatedEvent +- ConversationItemDeleteEvent +- ConversationItemDeletedEvent +- ConversationItemInputAudioTranscriptionCompletedEvent +- ConversationItemInputAudioTranscriptionDeltaEvent +- ConversationItemInputAudioTranscriptionFailedEvent +- ConversationItemRetrieveEvent +- ConversationItemTruncateEvent +- ConversationItemTruncatedEvent +- ConversationItemWithReference +- ErrorEvent +- InputAudioBufferAppendEvent +- InputAudioBufferClearEvent +- InputAudioBufferClearedEvent +- InputAudioBufferCommitEvent +- InputAudioBufferCommittedEvent +- InputAudioBufferSpeechStartedEvent +- InputAudioBufferSpeechStoppedEvent +- RateLimitsUpdatedEvent +- RealtimeClientEvent +- RealtimeResponse +- RealtimeResponseStatus +- RealtimeResponseUsage +- RealtimeServerEvent +- ResponseAudioDeltaEvent +- ResponseAudioDoneEvent +- ResponseAudioTranscriptDeltaEvent +- ResponseAudioTranscriptDoneEvent +- ResponseCancelEvent +- ResponseContentPartAddedEvent +- ResponseContentPartDoneEvent +- ResponseCreateEvent +- ResponseCreatedEvent +- ResponseDoneEvent +- ResponseFunctionCallArgumentsDeltaEvent +- ResponseFunctionCallArgumentsDoneEvent +- ResponseOutputItemAddedEvent +- ResponseOutputItemDoneEvent +- ResponseTextDeltaEvent +- ResponseTextDoneEvent +- SessionCreatedEvent +- SessionUpdateEvent +- SessionUpdatedEvent +- TranscriptionSessionUpdate +- TranscriptionSessionUpdatedEvent + +### Sessions -### Completions +Types: + +- Session +- SessionCreateResponse Methods: -- client.beta.chat.completions.runFunctions(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner -- client.beta.chat.completions.runTools(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner -- client.beta.chat.completions.stream(body, options?) -> ChatCompletionStream +- client.beta.realtime.sessions.create({ ...params }) -> SessionCreateResponse + +### TranscriptionSessions + +Types: + +- TranscriptionSession + +Methods: + +- client.beta.realtime.transcriptionSessions.create({ ...params }) -> TranscriptionSession ## Assistants @@ -445,3 +602,157 @@ Types: Methods: - client.uploads.parts.create(uploadId, { ...params }) -> UploadPart + +# Responses + +Types: + +- ComputerTool +- EasyInputMessage +- FileSearchTool +- FunctionTool +- Response +- ResponseAudioDeltaEvent +- ResponseAudioDoneEvent +- ResponseAudioTranscriptDeltaEvent +- ResponseAudioTranscriptDoneEvent +- ResponseCodeInterpreterCallCodeDeltaEvent +- ResponseCodeInterpreterCallCodeDoneEvent +- ResponseCodeInterpreterCallCompletedEvent +- ResponseCodeInterpreterCallInProgressEvent +- ResponseCodeInterpreterCallInterpretingEvent +- ResponseCodeInterpreterToolCall +- ResponseCompletedEvent +- ResponseComputerToolCall +- ResponseComputerToolCallOutputItem +- ResponseComputerToolCallOutputScreenshot +- ResponseContent +- ResponseContentPartAddedEvent +- ResponseContentPartDoneEvent +- ResponseCreatedEvent +- ResponseError +- ResponseErrorEvent +- ResponseFailedEvent +- ResponseFileSearchCallCompletedEvent +- ResponseFileSearchCallInProgressEvent +- ResponseFileSearchCallSearchingEvent +- ResponseFileSearchToolCall +- ResponseFormatTextConfig +- ResponseFormatTextJSONSchemaConfig +- ResponseFunctionCallArgumentsDeltaEvent +- ResponseFunctionCallArgumentsDoneEvent +- ResponseFunctionToolCall +- ResponseFunctionToolCallItem +- ResponseFunctionToolCallOutputItem +- ResponseFunctionWebSearch +- ResponseInProgressEvent +- ResponseIncludable +- ResponseIncompleteEvent +- ResponseInput +- ResponseInputAudio +- ResponseInputContent +- ResponseInputFile +- ResponseInputImage +- ResponseInputItem +- ResponseInputMessageContentList +- ResponseInputMessageItem +- ResponseInputText +- ResponseItem +- ResponseOutputAudio +- ResponseOutputItem +- ResponseOutputItemAddedEvent +- ResponseOutputItemDoneEvent +- ResponseOutputMessage +- ResponseOutputRefusal +- ResponseOutputText +- ResponseReasoningItem +- ResponseReasoningSummaryPartAddedEvent +- ResponseReasoningSummaryPartDoneEvent +- ResponseReasoningSummaryTextDeltaEvent +- ResponseReasoningSummaryTextDoneEvent +- ResponseRefusalDeltaEvent +- ResponseRefusalDoneEvent +- ResponseStatus +- ResponseStreamEvent +- ResponseTextAnnotationDeltaEvent +- ResponseTextConfig +- ResponseTextDeltaEvent +- ResponseTextDoneEvent +- ResponseUsage +- ResponseWebSearchCallCompletedEvent +- ResponseWebSearchCallInProgressEvent +- ResponseWebSearchCallSearchingEvent +- Tool +- ToolChoiceFunction +- ToolChoiceOptions +- ToolChoiceTypes +- WebSearchTool + +Methods: + +- client.responses.create({ ...params }) -> Response +- client.responses.retrieve(responseId, { ...params }) -> Response +- client.responses.del(responseId) -> void + +## InputItems + +Types: + +- ResponseItemList + +Methods: + +- client.responses.inputItems.list(responseId, { ...params }) -> ResponseItemsPage + +# Evals + +Types: + +- EvalCustomDataSourceConfig +- EvalStoredCompletionsDataSourceConfig +- EvalCreateResponse +- EvalRetrieveResponse +- EvalUpdateResponse +- EvalListResponse +- EvalDeleteResponse + +Methods: + +- client.evals.create({ ...params }) -> EvalCreateResponse +- client.evals.retrieve(evalId) -> EvalRetrieveResponse +- client.evals.update(evalId, { ...params }) -> EvalUpdateResponse +- client.evals.list({ ...params }) -> EvalListResponsesPage +- client.evals.del(evalId) -> EvalDeleteResponse + +## Runs + +Types: + +- CreateEvalCompletionsRunDataSource +- CreateEvalJSONLRunDataSource +- EvalAPIError +- RunCreateResponse +- RunRetrieveResponse +- RunListResponse +- RunDeleteResponse +- RunCancelResponse + +Methods: + +- client.evals.runs.create(evalId, { ...params }) -> RunCreateResponse +- client.evals.runs.retrieve(evalId, runId) -> RunRetrieveResponse +- client.evals.runs.list(evalId, { ...params }) -> RunListResponsesPage +- client.evals.runs.del(evalId, runId) -> RunDeleteResponse +- client.evals.runs.cancel(evalId, runId) -> RunCancelResponse + +### OutputItems + +Types: + +- OutputItemRetrieveResponse +- OutputItemListResponse + +Methods: + +- client.evals.runs.outputItems.retrieve(evalId, runId, outputItemId) -> OutputItemRetrieveResponse +- client.evals.runs.outputItems.list(evalId, runId, { ...params }) -> OutputItemListResponsesPage diff --git a/azure.md b/azure.md new file mode 100644 index 000000000..df06c2985 --- /dev/null +++ b/azure.md @@ -0,0 +1,49 @@ +# Microsoft Azure OpenAI + +To use this library with [Azure OpenAI](https://learn.microsoft.com/azure/ai-services/openai/overview), use the `AzureOpenAI` +class instead of the `OpenAI` class. + +> [!IMPORTANT] +> The Azure API shape slightly differs from the core API shape which means that the static types for responses / params +> won't always be correct. + +```ts +import { AzureOpenAI } from 'openai'; +import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; + +const credential = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const azureADTokenProvider = getBearerTokenProvider(credential, scope); + +const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: "" }); + +const result = await openai.chat.completions.create({ + model: 'gpt-4o', + messages: [{ role: 'user', content: 'Say hello!' }], +}); + +console.log(result.choices[0]!.message?.content); +``` + +For more information on support for the Azure API, see [azure.md](azure.md). + +## Realtime API + +This SDK provides real-time streaming capabilities for Azure OpenAI through the `OpenAIRealtimeWS` and `OpenAIRealtimeWebSocket` clients described previously. + +To utilize the real-time features, begin by creating a fully configured `AzureOpenAI` client and passing it into either `OpenAIRealtimeWS.azure` or `OpenAIRealtimeWebSocket.azure`. For example: + +```ts +const cred = new DefaultAzureCredential(); +const scope = 'https://cognitiveservices.azure.com/.default'; +const deploymentName = 'gpt-4o-realtime-preview-1001'; +const azureADTokenProvider = getBearerTokenProvider(cred, scope); +const client = new AzureOpenAI({ + azureADTokenProvider, + apiVersion: '2024-10-01-preview', + deployment: deploymentName, +}); +const rt = await OpenAIRealtimeWS.azure(client); +``` + +Once the instance has been created, you can then begin sending requests and receiving streaming responses in real time. diff --git a/bin/check-release-environment b/bin/check-release-environment index dbfd546bf..e51564b7d 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,10 +2,6 @@ errors=() -if [ -z "${STAINLESS_API_KEY}" ]; then - errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") -fi - if [ -z "${NPM_TOKEN}" ]; then errors+=("The OPENAI_NPM_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets") fi diff --git a/bin/publish-jsr b/bin/publish-jsr new file mode 100644 index 000000000..1b7365087 --- /dev/null +++ b/bin/publish-jsr @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -eux + +# Build the project +yarn build + +# Navigate to the dist directory +cd dist-deno + +npx jsr publish ${JSR_TOKEN:+"--token=$JSR_TOKEN"} diff --git a/ecosystem-tests/cli.ts b/ecosystem-tests/cli.ts index c03ea668a..77faddec5 100644 --- a/ecosystem-tests/cli.ts +++ b/ecosystem-tests/cli.ts @@ -70,6 +70,7 @@ const projectRunners = { 'cloudflare-worker': async () => { await installPackage(); + await fs.writeFile('.dev.vars', `OPENAI_API_KEY='${process.env['OPENAI_API_KEY']}'`); await run('npm', ['run', 'tsc']); if (state.live) { @@ -97,10 +98,9 @@ const projectRunners = { }, deno: async () => { // we don't need to explicitly install the package here - // because our deno setup relies on `rootDir/deno` to exist + // because our deno setup relies on `rootDir/dist-deno` to exist // which is an artifact produced from our build process - await run('deno', ['task', 'install']); - await run('deno', ['task', 'check']); + await run('deno', ['task', 'install', '--unstable-sloppy-imports']); if (state.live) await run('deno', ['task', 'test']); }, diff --git a/ecosystem-tests/cloudflare-worker/package-lock.json b/ecosystem-tests/cloudflare-worker/package-lock.json index 0673bb27c..99d787f75 100644 --- a/ecosystem-tests/cloudflare-worker/package-lock.json +++ b/ecosystem-tests/cloudflare-worker/package-lock.json @@ -17,7 +17,7 @@ "start-server-and-test": "^2.0.0", "ts-jest": "^29.1.0", "typescript": "5.0.4", - "wrangler": "^3.0.0" + "wrangler": "^3.85.0" } }, "node_modules/@ampproject/remapping": { @@ -662,18 +662,21 @@ "dev": true }, "node_modules/@cloudflare/kv-asset-handler": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@cloudflare/kv-asset-handler/-/kv-asset-handler-0.2.0.tgz", - "integrity": "sha512-MVbXLbTcAotOPUj0pAMhVtJ+3/kFkwJqc5qNOleOZTv6QkZZABDMS21dSrSlVswEHwrpWC03e4fWytjqKvuE2A==", + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/@cloudflare/kv-asset-handler/-/kv-asset-handler-0.3.4.tgz", + "integrity": "sha512-YLPHc8yASwjNkmcDMQMY35yiWjoKAKnhUbPRszBRS0YgH+IXtsMp61j+yTcnCE3oO2DgP0U3iejLC8FTtKDC8Q==", "dev": true, "dependencies": { "mime": "^3.0.0" + }, + "engines": { + "node": ">=16.13" } }, "node_modules/@cloudflare/workerd-darwin-64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20231030.0.tgz", - "integrity": "sha512-J4PQ9utPxLya9yHdMMx3AZeC5M/6FxcoYw6jo9jbDDFTy+a4Gslqf4Im9We3aeOEdPXa3tgQHVQOSelJSZLhIw==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20241022.0.tgz", + "integrity": "sha512-1NNYun37myMTgCUiPQEJ0cMal4mKZVTpkD0b2tx9hV70xji+frVJcSK8YVLeUm1P+Rw1d/ct8DMgQuCpsz3Fsw==", "cpu": [ "x64" ], @@ -687,9 +690,9 @@ } }, "node_modules/@cloudflare/workerd-darwin-arm64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20231030.0.tgz", - "integrity": "sha512-WSJJjm11Del4hSneiNB7wTXGtBXI4QMCH9l5qf4iT5PAW8cESGcCmdHtWDWDtGAAGcvmLT04KNvmum92vRKKQQ==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20241022.0.tgz", + "integrity": "sha512-FOO/0P0U82EsTLTdweNVgw+4VOk5nghExLPLSppdOziq6IR5HVgP44Kmq5LdsUeHUhwUmfOh9hzaTpkNzUqKvw==", "cpu": [ "arm64" ], @@ -703,9 +706,9 @@ } }, "node_modules/@cloudflare/workerd-linux-64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20231030.0.tgz", - "integrity": "sha512-2HUeRTvoCC17fxE0qdBeR7J9dO8j4A8ZbdcvY8pZxdk+zERU6+N03RTbk/dQMU488PwiDvcC3zZqS4gwLfVT8g==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20241022.0.tgz", + "integrity": "sha512-RsNc19BQJG9yd+ngnjuDeG9ywZG+7t1L4JeglgceyY5ViMNMKVO7Zpbsu69kXslU9h6xyQG+lrmclg3cBpnhYA==", "cpu": [ "x64" ], @@ -719,9 +722,9 @@ } }, "node_modules/@cloudflare/workerd-linux-arm64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20231030.0.tgz", - "integrity": "sha512-4/GK5zHh+9JbUI6Z5xTCM0ZmpKKHk7vu9thmHjUxtz+o8Ne9DoD7DlDvXQWgMF6XGaTubDWyp3ttn+Qv8jDFuQ==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20241022.0.tgz", + "integrity": "sha512-x5mUXpKxfsosxcFmcq5DaqLs37PejHYVRsNz1cWI59ma7aC4y4Qn6Tf3i0r9MwQTF/MccP4SjVslMU6m4W7IaA==", "cpu": [ "arm64" ], @@ -735,9 +738,9 @@ } }, "node_modules/@cloudflare/workerd-windows-64": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20231030.0.tgz", - "integrity": "sha512-fb/Jgj8Yqy3PO1jLhk7mTrHMkR8jklpbQFud6rL/aMAn5d6MQbaSrYOCjzkKGp0Zng8D2LIzSl+Fc0C9Sggxjg==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20241022.0.tgz", + "integrity": "sha512-eBCClx4szCOgKqOlxxbdNszMqQf3MRG1B9BRIqEM/diDfdR9IrZ8l3FaEm+l9gXgPmS6m1NBn40aWuGBl8UTSw==", "cpu": [ "x64" ], @@ -750,12 +753,47 @@ "node": ">=16" } }, + "node_modules/@cloudflare/workers-shared": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workers-shared/-/workers-shared-0.7.0.tgz", + "integrity": "sha512-LLQRTqx7lKC7o2eCYMpyc5FXV8d0pUX6r3A+agzhqS9aoR5A6zCPefwQGcvbKx83ozX22ATZcemwxQXn12UofQ==", + "dev": true, + "dependencies": { + "mime": "^3.0.0", + "zod": "^3.22.3" + }, + "engines": { + "node": ">=16.7.0" + } + }, "node_modules/@cloudflare/workers-types": { - "version": "4.20230821.0", - "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20230821.0.tgz", - "integrity": "sha512-lVQSyr5E4CEkQw7WIdsrMTj+kHjsm28mJ0B5AhNFByKR+16KTFsU/RW/nGLKHHW2jxT5lvYI+HjNQMzC9QR8Ng==", + "version": "4.20241106.0", + "resolved": "https://registry.npmjs.org/@cloudflare/workers-types/-/workers-types-4.20241106.0.tgz", + "integrity": "sha512-pI4ivacmp+vgNO/siHDsZ6BdITR0LC4Mh/1+yzVLcl9U75pt5DUDCOWOiqIRFXRq6H65DPnJbEPFo3x9UfgofQ==", "dev": true }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, "node_modules/@esbuild-plugins/node-globals-polyfill": { "version": "0.2.3", "resolved": "https://registry.npmjs.org/@esbuild-plugins/node-globals-polyfill/-/node-globals-polyfill-0.2.3.tgz", @@ -1142,6 +1180,15 @@ "node": ">=12" } }, + "node_modules/@fastify/busboy": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.1.1.tgz", + "integrity": "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==", + "dev": true, + "engines": { + "node": ">=14" + } + }, "node_modules/@hapi/hoek": { "version": "9.3.0", "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", @@ -1655,9 +1702,9 @@ "dev": true }, "node_modules/acorn": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", - "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "version": "8.14.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", + "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -1667,10 +1714,13 @@ } }, "node_modules/acorn-walk": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", - "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", "dev": true, + "dependencies": { + "acorn": "^8.11.0" + }, "engines": { "node": ">=0.4.0" } @@ -1983,18 +2033,6 @@ "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", "dev": true }, - "node_modules/busboy": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", - "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", - "dev": true, - "dependencies": { - "streamsearch": "^1.1.0" - }, - "engines": { - "node": ">=10.16.0" - } - }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -2198,9 +2236,9 @@ "dev": true }, "node_modules/cookie": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", - "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", "dev": true, "engines": { "node": ">= 0.6" @@ -2249,6 +2287,16 @@ "node": ">= 12" } }, + "node_modules/date-fns": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-4.1.0.tgz", + "integrity": "sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==", + "dev": true, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/kossnocorp" + } + }, "node_modules/debug": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", @@ -2289,6 +2337,12 @@ "node": ">=0.10.0" } }, + "node_modules/defu": { + "version": "6.1.4", + "resolved": "https://registry.npmjs.org/defu/-/defu-6.1.4.tgz", + "integrity": "sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==", + "dev": true + }, "node_modules/delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -3038,6 +3092,12 @@ "node": ">=8" } }, + "node_modules/itty-time": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/itty-time/-/itty-time-1.0.6.tgz", + "integrity": "sha512-+P8IZaLLBtFv8hCkIjcymZOp4UJ+xW6bSlQsXGqrkmJh7vSiMFSlNne0mCYagEE0N7HDNR5jJBRxwN0oYv61Rw==", + "dev": true + }, "node_modules/jest": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", @@ -3894,23 +3954,23 @@ } }, "node_modules/miniflare": { - "version": "3.20231030.3", - "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-3.20231030.3.tgz", - "integrity": "sha512-lquHSh0XiO8uoWDujOLHtDS9mkUTJTc5C5amiQ6A++5y0f+DWiMqbDBvvwjlYf4Dvqk6ChFya9dztk7fg2ZVxA==", + "version": "3.20241022.0", + "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-3.20241022.0.tgz", + "integrity": "sha512-x9Fbq1Hmz1f0osIT9Qmj78iX4UpCP2EqlZnA/tzj/3+I49vc3Kq0fNqSSKplcdf6HlCHdL3fOBicmreQF4BUUQ==", "dev": true, "dependencies": { + "@cspotcode/source-map-support": "0.8.1", "acorn": "^8.8.0", "acorn-walk": "^8.2.0", "capnp-ts": "^0.7.0", "exit-hook": "^2.2.1", "glob-to-regexp": "^0.4.1", - "source-map-support": "0.5.21", "stoppable": "^1.1.0", - "undici": "^5.22.1", - "workerd": "1.20231030.0", - "ws": "^8.11.0", + "undici": "^5.28.4", + "workerd": "1.20241022.0", + "ws": "^8.17.1", "youch": "^3.2.2", - "zod": "^3.20.6" + "zod": "^3.22.3" }, "bin": { "miniflare": "bootstrap.js" @@ -3919,16 +3979,6 @@ "node": ">=16.13" } }, - "node_modules/miniflare/node_modules/source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "dev": true, - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, "node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", @@ -4066,6 +4116,12 @@ "node": ">=8" } }, + "node_modules/ohash": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/ohash/-/ohash-1.1.4.tgz", + "integrity": "sha512-FlDryZAahJmEF3VR3w1KogSEdWX3WhA5GPakFx4J81kEAiHyLMpdLLElS8n8dfNadMgAne/MywcvmogzscVt4g==", + "dev": true + }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -4193,9 +4249,15 @@ "dev": true }, "node_modules/path-to-regexp": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz", - "integrity": "sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw==", + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz", + "integrity": "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==", + "dev": true + }, + "node_modules/pathe": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", + "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", "dev": true }, "node_modules/pause-stream": { @@ -4613,15 +4675,6 @@ "duplexer": "~0.1.1" } }, - "node_modules/streamsearch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", - "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", - "dev": true, - "engines": { - "node": ">=10.0.0" - } - }, "node_modules/string-length": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", @@ -4878,18 +4931,37 @@ "node": ">=12.20" } }, + "node_modules/ufo": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.5.4.tgz", + "integrity": "sha512-UsUk3byDzKd04EyoZ7U4DOlxQaD14JUKQl6/P7wiX4FNvUfm3XL246n9W5AmqwW5RSFJ27NAuM0iLscAOYUiGQ==", + "dev": true + }, "node_modules/undici": { - "version": "5.23.0", - "resolved": "https://registry.npmjs.org/undici/-/undici-5.23.0.tgz", - "integrity": "sha512-1D7w+fvRsqlQ9GscLBwcAJinqcZGHUKjbOmXdlE/v8BvEGXjeWAax+341q44EuTcHXXnfyKNbKRq4Lg7OzhMmg==", + "version": "5.28.4", + "resolved": "https://registry.npmjs.org/undici/-/undici-5.28.4.tgz", + "integrity": "sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==", "dev": true, "dependencies": { - "busboy": "^1.6.0" + "@fastify/busboy": "^2.0.0" }, "engines": { "node": ">=14.0" } }, + "node_modules/unenv": { + "name": "unenv-nightly", + "version": "2.0.0-20241024-111401-d4156ac", + "resolved": "https://registry.npmjs.org/unenv-nightly/-/unenv-nightly-2.0.0-20241024-111401-d4156ac.tgz", + "integrity": "sha512-xJO1hfY+Te+/XnfCYrCbFbRcgu6XEODND1s5wnVbaBCkuQX7JXF7fHEXPrukFE2j8EOH848P8QN19VO47XN8hw==", + "dev": true, + "dependencies": { + "defu": "^6.1.4", + "ohash": "^1.1.4", + "pathe": "^1.1.2", + "ufo": "^1.5.4" + } + }, "node_modules/update-browserslist-db": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", @@ -4986,9 +5058,9 @@ } }, "node_modules/workerd": { - "version": "1.20231030.0", - "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20231030.0.tgz", - "integrity": "sha512-+FSW+d31f8RrjHanFf/R9A+Z0csf3OtsvzdPmAKuwuZm/5HrBv83cvG9fFeTxl7/nI6irUUXIRF9xcj/NomQzQ==", + "version": "1.20241022.0", + "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20241022.0.tgz", + "integrity": "sha512-jyGXsgO9DRcJyx6Ovv7gUyDPc3UYC2i/E0p9GFUg6GUzpldw4Y93y9kOmdfsOnKZ3+lY53veSiUniiBPE6Q2NQ==", "dev": true, "hasInstallScript": true, "bin": { @@ -4998,32 +5070,37 @@ "node": ">=16" }, "optionalDependencies": { - "@cloudflare/workerd-darwin-64": "1.20231030.0", - "@cloudflare/workerd-darwin-arm64": "1.20231030.0", - "@cloudflare/workerd-linux-64": "1.20231030.0", - "@cloudflare/workerd-linux-arm64": "1.20231030.0", - "@cloudflare/workerd-windows-64": "1.20231030.0" + "@cloudflare/workerd-darwin-64": "1.20241022.0", + "@cloudflare/workerd-darwin-arm64": "1.20241022.0", + "@cloudflare/workerd-linux-64": "1.20241022.0", + "@cloudflare/workerd-linux-arm64": "1.20241022.0", + "@cloudflare/workerd-windows-64": "1.20241022.0" } }, "node_modules/wrangler": { - "version": "3.19.0", - "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-3.19.0.tgz", - "integrity": "sha512-pY7xWqkQn6DJ+1vz9YHz2pCftEmK+JCTj9sqnucp0NZnlUiILDmBWegsjjCLZycgfiA62J213N7NvjLPr2LB8w==", + "version": "3.85.0", + "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-3.85.0.tgz", + "integrity": "sha512-r5YCWUaF4ApLnloNE6jHHgRYdFzYHoajTlC1tns42UzQ2Ls63VAqD3b0cxOqzDUfmlSb3skpmu0B0Ssi3QWPAg==", "dev": true, "dependencies": { - "@cloudflare/kv-asset-handler": "^0.2.0", + "@cloudflare/kv-asset-handler": "0.3.4", + "@cloudflare/workers-shared": "0.7.0", "@esbuild-plugins/node-globals-polyfill": "^0.2.3", "@esbuild-plugins/node-modules-polyfill": "^0.2.2", "blake3-wasm": "^2.1.5", "chokidar": "^3.5.3", + "date-fns": "^4.1.0", "esbuild": "0.17.19", - "miniflare": "3.20231030.3", + "itty-time": "^1.0.6", + "miniflare": "3.20241022.0", "nanoid": "^3.3.3", - "path-to-regexp": "^6.2.0", + "path-to-regexp": "^6.3.0", + "resolve": "^1.22.8", "resolve.exports": "^2.0.2", "selfsigned": "^2.0.1", - "source-map": "0.6.1", - "source-map-support": "0.5.21", + "source-map": "^0.6.1", + "unenv": "npm:unenv-nightly@2.0.0-20241024-111401-d4156ac", + "workerd": "1.20241022.0", "xxhash-wasm": "^1.0.1" }, "bin": { @@ -5035,16 +5112,14 @@ }, "optionalDependencies": { "fsevents": "~2.3.2" - } - }, - "node_modules/wrangler/node_modules/source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "dev": true, - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" + }, + "peerDependencies": { + "@cloudflare/workers-types": "^4.20241022.0" + }, + "peerDependenciesMeta": { + "@cloudflare/workers-types": { + "optional": true + } } }, "node_modules/wrap-ansi": { @@ -5084,9 +5159,9 @@ } }, "node_modules/ws": { - "version": "8.13.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz", - "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==", + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", + "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", "dev": true, "engines": { "node": ">=10.0.0" @@ -5165,20 +5240,20 @@ } }, "node_modules/youch": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/youch/-/youch-3.2.3.tgz", - "integrity": "sha512-ZBcWz/uzZaQVdCvfV4uk616Bbpf2ee+F/AvuKDR5EwX/Y4v06xWdtMluqTD7+KlZdM93lLm9gMZYo0sKBS0pgw==", + "version": "3.3.4", + "resolved": "https://registry.npmjs.org/youch/-/youch-3.3.4.tgz", + "integrity": "sha512-UeVBXie8cA35DS6+nBkls68xaBBXCye0CNznrhszZjTbRVnJKQuNsyLKBTTL4ln1o1rh2PKtv35twV7irj5SEg==", "dev": true, "dependencies": { - "cookie": "^0.5.0", + "cookie": "^0.7.1", "mustache": "^4.2.0", "stacktracey": "^2.1.8" } }, "node_modules/zod": { - "version": "3.22.2", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.22.2.tgz", - "integrity": "sha512-wvWkphh5WQsJbVk1tbx1l1Ly4yg+XecD+Mq280uBGt9wa5BKSWf4Mhp6GmrkPixhMxmabYY7RbzlwVP32pbGCg==", + "version": "3.23.8", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.23.8.tgz", + "integrity": "sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==", "dev": true, "funding": { "url": "https://github.com/sponsors/colinhacks" diff --git a/ecosystem-tests/cloudflare-worker/package.json b/ecosystem-tests/cloudflare-worker/package.json index 463de4045..3034e97f7 100644 --- a/ecosystem-tests/cloudflare-worker/package.json +++ b/ecosystem-tests/cloudflare-worker/package.json @@ -17,7 +17,7 @@ "start-server-and-test": "^2.0.0", "ts-jest": "^29.1.0", "typescript": "5.0.4", - "wrangler": "^3.0.0" + "wrangler": "^3.85.0" }, "dependencies": { "node-fetch": "^3.3.1" diff --git a/ecosystem-tests/deno/deno.jsonc b/ecosystem-tests/deno/deno.jsonc index 7de05f2ba..46d7ee486 100644 --- a/ecosystem-tests/deno/deno.jsonc +++ b/ecosystem-tests/deno/deno.jsonc @@ -1,11 +1,10 @@ { "tasks": { "install": "deno install --node-modules-dir main_test.ts -f", - "check": "deno lint && deno check main_test.ts", - "test": "deno test --allow-env --allow-net --allow-read --node-modules-dir" + "test": "deno test --allow-env --allow-net --allow-read --node-modules-dir --unstable-sloppy-imports --no-check" }, "imports": { - "openai": "../../deno/mod.ts", - "openai/": "../../deno/" + "openai": "../../dist-deno/index.ts", + "openai/": "../../dist-deno/" } } diff --git a/ecosystem-tests/vercel-edge/package-lock.json b/ecosystem-tests/vercel-edge/package-lock.json index bc820a010..aaca4370c 100644 --- a/ecosystem-tests/vercel-edge/package-lock.json +++ b/ecosystem-tests/vercel-edge/package-lock.json @@ -9,7 +9,7 @@ "version": "0.1.0", "dependencies": { "ai": "2.1.34", - "next": "14.1.1", + "next": "^14.2.25", "react": "18.2.0", "react-dom": "18.2.0" }, @@ -1180,14 +1180,14 @@ } }, "node_modules/@next/env": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.1.1.tgz", - "integrity": "sha512-7CnQyD5G8shHxQIIg3c7/pSeYFeMhsNbpU/bmvH7ZnDql7mNRgg8O2JZrhrc/soFnfBnKP4/xXNiiSIPn2w8gA==" + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz", + "integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==" }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.1.1.tgz", - "integrity": "sha512-yDjSFKQKTIjyT7cFv+DqQfW5jsD+tVxXTckSe1KIouKk75t1qZmj/mV3wzdmFb0XHVGtyRjDMulfVG8uCKemOQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz", + "integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==", "cpu": [ "arm64" ], @@ -1200,9 +1200,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.1.1.tgz", - "integrity": "sha512-KCQmBL0CmFmN8D64FHIZVD9I4ugQsDBBEJKiblXGgwn7wBCSe8N4Dx47sdzl4JAg39IkSN5NNrr8AniXLMb3aw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz", + "integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==", "cpu": [ "x64" ], @@ -1215,9 +1215,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.1.1.tgz", - "integrity": "sha512-YDQfbWyW0JMKhJf/T4eyFr4b3tceTorQ5w2n7I0mNVTFOvu6CGEzfwT3RSAQGTi/FFMTFcuspPec/7dFHuP7Eg==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz", + "integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==", "cpu": [ "arm64" ], @@ -1230,9 +1230,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.1.1.tgz", - "integrity": "sha512-fiuN/OG6sNGRN/bRFxRvV5LyzLB8gaL8cbDH5o3mEiVwfcMzyE5T//ilMmaTrnA8HLMS6hoz4cHOu6Qcp9vxgQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz", + "integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==", "cpu": [ "arm64" ], @@ -1245,9 +1245,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.1.1.tgz", - "integrity": "sha512-rv6AAdEXoezjbdfp3ouMuVqeLjE1Bin0AuE6qxE6V9g3Giz5/R3xpocHoAi7CufRR+lnkuUjRBn05SYJ83oKNQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz", + "integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==", "cpu": [ "x64" ], @@ -1260,9 +1260,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.1.1.tgz", - "integrity": "sha512-YAZLGsaNeChSrpz/G7MxO3TIBLaMN8QWMr3X8bt6rCvKovwU7GqQlDu99WdvF33kI8ZahvcdbFsy4jAFzFX7og==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz", + "integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==", "cpu": [ "x64" ], @@ -1275,9 +1275,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.1.1.tgz", - "integrity": "sha512-1L4mUYPBMvVDMZg1inUYyPvFSduot0g73hgfD9CODgbr4xiTYe0VOMTZzaRqYJYBA9mana0x4eaAaypmWo1r5A==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz", + "integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==", "cpu": [ "arm64" ], @@ -1290,9 +1290,9 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.1.1.tgz", - "integrity": "sha512-jvIE9tsuj9vpbbXlR5YxrghRfMuG0Qm/nZ/1KDHc+y6FpnZ/apsgh+G6t15vefU0zp3WSpTMIdXRUsNl/7RSuw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz", + "integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==", "cpu": [ "ia32" ], @@ -1305,9 +1305,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.1.1.tgz", - "integrity": "sha512-S6K6EHDU5+1KrBDLko7/c1MNy/Ya73pIAmvKeFwsF4RmBFJSO7/7YeD4FnZ4iBdzE69PpQ4sOMU9ORKeNuxe8A==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz", + "integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==", "cpu": [ "x64" ], @@ -1418,11 +1418,17 @@ "@sinonjs/commons": "^3.0.0" } }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==" + }, "node_modules/@swc/helpers": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.2.tgz", - "integrity": "sha512-E4KcWTpoLHqwPHLxidpOqQbcrZVgi0rsmmZXUle1jXmJfuIf/UWpczUJ7MZZ5tlxytgJXyp0w4PGkkeLiuIdZw==", + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", "dependencies": { + "@swc/counter": "^0.1.3", "tslib": "^2.4.0" } }, @@ -2900,10 +2906,11 @@ } }, "node_modules/detect-libc": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz", - "integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", + "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", "dev": true, + "license": "Apache-2.0", "engines": { "node": ">=8" } @@ -5061,12 +5068,12 @@ "dev": true }, "node_modules/next": { - "version": "14.1.1", - "resolved": "https://registry.npmjs.org/next/-/next-14.1.1.tgz", - "integrity": "sha512-McrGJqlGSHeaz2yTRPkEucxQKe5Zq7uPwyeHNmJaZNY4wx9E9QdxmTp310agFRoMuIYgQrCrT3petg13fSVOww==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz", + "integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==", "dependencies": { - "@next/env": "14.1.1", - "@swc/helpers": "0.5.2", + "@next/env": "14.2.25", + "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", "graceful-fs": "^4.2.11", @@ -5080,18 +5087,19 @@ "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.1.1", - "@next/swc-darwin-x64": "14.1.1", - "@next/swc-linux-arm64-gnu": "14.1.1", - "@next/swc-linux-arm64-musl": "14.1.1", - "@next/swc-linux-x64-gnu": "14.1.1", - "@next/swc-linux-x64-musl": "14.1.1", - "@next/swc-win32-arm64-msvc": "14.1.1", - "@next/swc-win32-ia32-msvc": "14.1.1", - "@next/swc-win32-x64-msvc": "14.1.1" + "@next/swc-darwin-arm64": "14.2.25", + "@next/swc-darwin-x64": "14.2.25", + "@next/swc-linux-arm64-gnu": "14.2.25", + "@next/swc-linux-arm64-musl": "14.2.25", + "@next/swc-linux-x64-gnu": "14.2.25", + "@next/swc-linux-x64-musl": "14.2.25", + "@next/swc-win32-arm64-msvc": "14.2.25", + "@next/swc-win32-ia32-msvc": "14.2.25", + "@next/swc-win32-x64-msvc": "14.2.25" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", "react": "^18.2.0", "react-dom": "^18.2.0", "sass": "^1.3.0" @@ -5100,6 +5108,9 @@ "@opentelemetry/api": { "optional": true }, + "@playwright/test": { + "optional": true + }, "sass": { "optional": true } @@ -6363,9 +6374,10 @@ "dev": true }, "node_modules/tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" }, "node_modules/type-detect": { "version": "4.0.8", diff --git a/ecosystem-tests/vercel-edge/package.json b/ecosystem-tests/vercel-edge/package.json index 4c75dd4fd..5a8fea816 100644 --- a/ecosystem-tests/vercel-edge/package.json +++ b/ecosystem-tests/vercel-edge/package.json @@ -15,7 +15,7 @@ }, "dependencies": { "ai": "2.1.34", - "next": "14.1.1", + "next": "^14.2.25", "react": "18.2.0", "react-dom": "18.2.0" }, diff --git a/examples/azure.ts b/examples/azure/chat.ts similarity index 91% rename from examples/azure.ts rename to examples/azure/chat.ts index 5fe1718fa..46df820f8 100755 --- a/examples/azure.ts +++ b/examples/azure/chat.ts @@ -2,6 +2,7 @@ import { AzureOpenAI } from 'openai'; import { getBearerTokenProvider, DefaultAzureCredential } from '@azure/identity'; +import 'dotenv/config'; // Corresponds to your Model deployment within your OpenAI resource, e.g. gpt-4-1106-preview // Navigate to the Azure OpenAI Studio to deploy a model. @@ -13,7 +14,7 @@ const azureADTokenProvider = getBearerTokenProvider(credential, scope); // Make sure to set AZURE_OPENAI_ENDPOINT with the endpoint of your Azure resource. // You can find it in the Azure Portal. -const openai = new AzureOpenAI({ azureADTokenProvider }); +const openai = new AzureOpenAI({ azureADTokenProvider, apiVersion: '2024-10-01-preview' }); async function main() { console.log('Non-streaming:'); diff --git a/examples/azure/realtime/websocket.ts b/examples/azure/realtime/websocket.ts new file mode 100644 index 000000000..91fe3b7b9 --- /dev/null +++ b/examples/azure/realtime/websocket.ts @@ -0,0 +1,60 @@ +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; +import { AzureOpenAI } from 'openai'; +import { DefaultAzureCredential, getBearerTokenProvider } from '@azure/identity'; +import 'dotenv/config'; + +async function main() { + const cred = new DefaultAzureCredential(); + const scope = 'https://cognitiveservices.azure.com/.default'; + const deploymentName = 'gpt-4o-realtime-preview-1001'; + const azureADTokenProvider = getBearerTokenProvider(cred, scope); + const client = new AzureOpenAI({ + azureADTokenProvider, + apiVersion: '2024-10-01-preview', + deployment: deploymentName, + }); + const rt = await OpenAIRealtimeWebSocket.azure(client); + + // access the underlying `ws.WebSocket` instance + rt.socket.addEventListener('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); + }); + + rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue processing events regardless of any errors + throw err; + }); + + rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); + }); + + rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); + rt.on('response.text.done', () => console.log()); + + rt.on('response.done', () => rt.close()); + + rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); +} + +main(); diff --git a/examples/azure/realtime/ws.ts b/examples/azure/realtime/ws.ts new file mode 100644 index 000000000..8b22aeef0 --- /dev/null +++ b/examples/azure/realtime/ws.ts @@ -0,0 +1,60 @@ +import { DefaultAzureCredential, getBearerTokenProvider } from '@azure/identity'; +import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; +import { AzureOpenAI } from 'openai'; +import 'dotenv/config'; + +async function main() { + const cred = new DefaultAzureCredential(); + const scope = 'https://cognitiveservices.azure.com/.default'; + const deploymentName = 'gpt-4o-realtime-preview-1001'; + const azureADTokenProvider = getBearerTokenProvider(cred, scope); + const client = new AzureOpenAI({ + azureADTokenProvider, + apiVersion: '2024-10-01-preview', + deployment: deploymentName, + }); + const rt = await OpenAIRealtimeWS.azure(client); + + // access the underlying `ws.WebSocket` instance + rt.socket.on('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); + }); + + rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue processing events regardless of any errors + throw err; + }); + + rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); + }); + + rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); + rt.on('response.text.done', () => console.log()); + + rt.on('response.done', () => rt.close()); + + rt.socket.on('close', () => console.log('\nConnection closed!')); +} + +main(); diff --git a/examples/package-lock.json b/examples/package-lock.json new file mode 100644 index 000000000..6feb8c5f4 --- /dev/null +++ b/examples/package-lock.json @@ -0,0 +1,2007 @@ +{ + "name": "openai-examples", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "openai-examples", + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "@azure/identity": "^4.2.0", + "dotenv": "^16.4.7", + "express": "^4.18.2", + "next": "^14.2.25", + "openai": "file:..", + "zod-to-json-schema": "^3.21.4" + }, + "devDependencies": { + "@types/body-parser": "^1.19.3", + "@types/express": "^4.17.19", + "@types/web": "^0.0.194" + } + }, + "..": { + "name": "openai", + "version": "4.89.0", + "license": "Apache-2.0", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7" + }, + "bin": { + "openai": "bin/cli" + }, + "devDependencies": { + "@swc/core": "^1.3.102", + "@swc/jest": "^0.2.29", + "@types/jest": "^29.4.0", + "@types/ws": "^8.5.13", + "@typescript-eslint/eslint-plugin": "^6.7.0", + "@typescript-eslint/parser": "^6.7.0", + "eslint": "^8.49.0", + "eslint-plugin-prettier": "^5.0.1", + "eslint-plugin-unused-imports": "^3.0.0", + "fast-check": "^3.22.0", + "iconv-lite": "^0.6.3", + "jest": "^29.4.0", + "prettier": "^3.0.0", + "prettier-2": "npm:prettier@^2", + "ts-jest": "^29.1.0", + "ts-node": "^10.5.0", + "tsc-multi": "^1.1.0", + "tsconfig-paths": "^4.0.0", + "typescript": "^4.8.2", + "ws": "^8.18.0", + "zod": "^3.23.8" + }, + "peerDependencies": { + "ws": "^8.18.0", + "zod": "^3.23.8" + }, + "peerDependenciesMeta": { + "ws": { + "optional": true + }, + "zod": { + "optional": true + } + } + }, + "node_modules/@azure/abort-controller": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@azure/abort-controller/-/abort-controller-2.1.2.tgz", + "integrity": "sha512-nBrLsEWm4J2u5LpAPjxADTlq3trDgVZZXHNKabeXZtpq3d3AbN/KGO82R87rdDz5/lYB024rtEf10/q0urNgsA==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-auth": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@azure/core-auth/-/core-auth-1.9.0.tgz", + "integrity": "sha512-FPwHpZywuyasDSLMqJ6fhbOK3TqUdviZNF8OqRGA4W5Ewib2lEEZ+pBsYcBa88B2NGO/SEnYPGhyBqNlE8ilSw==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-util": "^1.11.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-client": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@azure/core-client/-/core-client-1.9.3.tgz", + "integrity": "sha512-/wGw8fJ4mdpJ1Cum7s1S+VQyXt1ihwKLzfabS1O/RDADnmzVc01dHn44qD0BvGH6KlZNzOMW95tEpKqhkCChPA==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.4.0", + "@azure/core-rest-pipeline": "^1.9.1", + "@azure/core-tracing": "^1.0.0", + "@azure/core-util": "^1.6.1", + "@azure/logger": "^1.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-rest-pipeline": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/@azure/core-rest-pipeline/-/core-rest-pipeline-1.19.1.tgz", + "integrity": "sha512-zHeoI3NCs53lLBbWNzQycjnYKsA1CVKlnzSNuSFcUDwBp8HHVObePxrM7HaX+Ha5Ks639H7chNC9HOaIhNS03w==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.8.0", + "@azure/core-tracing": "^1.0.1", + "@azure/core-util": "^1.11.0", + "@azure/logger": "^1.0.0", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-tracing": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@azure/core-tracing/-/core-tracing-1.2.0.tgz", + "integrity": "sha512-UKTiEJPkWcESPYJz3X5uKRYyOcJD+4nYph+KpfdPRnQJVrZfk0KJgdnaAWKfhsBBtAf/D58Az4AvCJEmWgIBAg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-util": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/@azure/core-util/-/core-util-1.11.0.tgz", + "integrity": "sha512-DxOSLua+NdpWoSqULhjDyAZTXFdP/LKkqtYuxxz1SCN289zk3OG8UOpnCQAz/tygyACBtWp/BoO72ptK7msY8g==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/identity": { + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/@azure/identity/-/identity-4.8.0.tgz", + "integrity": "sha512-l9ALUGHtFB/JfsqmA+9iYAp2a+cCwdNO/cyIr2y7nJLJsz1aae6qVP8XxT7Kbudg0IQRSIMXj0+iivFdbD1xPA==", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.9.0", + "@azure/core-client": "^1.9.2", + "@azure/core-rest-pipeline": "^1.17.0", + "@azure/core-tracing": "^1.0.0", + "@azure/core-util": "^1.11.0", + "@azure/logger": "^1.0.0", + "@azure/msal-browser": "^4.2.0", + "@azure/msal-node": "^3.2.3", + "events": "^3.0.0", + "jws": "^4.0.0", + "open": "^10.1.0", + "stoppable": "^1.1.0", + "tslib": "^2.2.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/logger": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@azure/logger/-/logger-1.1.4.tgz", + "integrity": "sha512-4IXXzcCdLdlXuCG+8UKEwLA1T1NHqUfanhXYHiQTn+6sfWCZXduqbtXDGceg3Ce5QxTGo7EqmbV6Bi+aqKuClQ==", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/msal-browser": { + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/@azure/msal-browser/-/msal-browser-4.8.0.tgz", + "integrity": "sha512-z7kJlMW3IAETyq82LDKJqr++IeOvU728q9lkuTFjEIPUWxnB1OlmuPCF32fYurxOnOnJeFEZxjbEzq8xyP0aag==", + "license": "MIT", + "dependencies": { + "@azure/msal-common": "15.3.0" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@azure/msal-common": { + "version": "15.3.0", + "resolved": "https://registry.npmjs.org/@azure/msal-common/-/msal-common-15.3.0.tgz", + "integrity": "sha512-lh+eZfibGwtQxFnx+mj6cYWn0pwA8tDnn8CBs9P21nC7Uw5YWRwfXaXdVQSMENZ5ojRqR+NzRaucEo4qUvs3pA==", + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@azure/msal-node": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@azure/msal-node/-/msal-node-3.4.0.tgz", + "integrity": "sha512-b4wBaPV68i+g61wFOfl5zh1lQ9UylgCQpI2638pJHV0SINneO78hOFdnX8WCoGw5OOc4Eewth9pYOg7gaiyUYw==", + "license": "MIT", + "dependencies": { + "@azure/msal-common": "15.3.0", + "jsonwebtoken": "^9.0.0", + "uuid": "^8.3.0" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/@next/env": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz", + "integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==", + "license": "MIT" + }, + "node_modules/@next/swc-darwin-arm64": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz", + "integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-darwin-x64": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz", + "integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-gnu": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz", + "integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-musl": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz", + "integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-gnu": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz", + "integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-musl": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz", + "integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-arm64-msvc": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz", + "integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-ia32-msvc": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz", + "integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-x64-msvc": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz", + "integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", + "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", + "license": "Apache-2.0" + }, + "node_modules/@swc/helpers": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.5.tgz", + "integrity": "sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==", + "license": "Apache-2.0", + "dependencies": { + "@swc/counter": "^0.1.3", + "tslib": "^2.4.0" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.5", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.5.tgz", + "integrity": "sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/express": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.21.tgz", + "integrity": "sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.19.6", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.6.tgz", + "integrity": "sha512-N4LZ2xG7DatVqhCZzOGb1Yi5lMbXSZcmdLDe9EzSndPV2HpWYWzRbaerl2n27irrm94EPpprqa8KpskPT085+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/http-errors": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz", + "integrity": "sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/mime": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", + "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "22.13.13", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.13.tgz", + "integrity": "sha512-ClsL5nMwKaBRwPcCvH8E7+nU4GxHVx1axNvMZTFHMEfNI7oahimt26P5zjVCRrjiIWj6YFXfE1v3dEp94wLcGQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.20.0" + } + }, + "node_modules/@types/qs": { + "version": "6.9.18", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.18.tgz", + "integrity": "sha512-kK7dgTYDyGqS+e2Q4aK9X3D7q234CIZ1Bv0q/7Z5IwRDoADNU81xXJK/YVyLbLTZCoIwUoDoffFeF+p/eIklAA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/range-parser": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", + "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/send": { + "version": "0.17.4", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.4.tgz", + "integrity": "sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "1.15.7", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.7.tgz", + "integrity": "sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/http-errors": "*", + "@types/node": "*", + "@types/send": "*" + } + }, + "node_modules/@types/web": { + "version": "0.0.194", + "resolved": "https://registry.npmjs.org/@types/web/-/web-0.0.194.tgz", + "integrity": "sha512-VKseTFF3Y8SNbpZqdVFNWQ677ujwNyrI9LcySEUwZX5iebbcdE235Lq/vqrfCzj1oFsXyVUUBqq4x8enXSakMA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "license": "MIT", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/agent-base": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", + "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", + "license": "MIT" + }, + "node_modules/body-parser": { + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.13.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/buffer-equal-constant-time": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", + "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", + "license": "BSD-3-Clause" + }, + "node_modules/bundle-name": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-4.1.0.tgz", + "integrity": "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==", + "license": "MIT", + "dependencies": { + "run-applescript": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/busboy": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", + "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", + "dependencies": { + "streamsearch": "^1.1.0" + }, + "engines": { + "node": ">=10.16.0" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001707", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001707.tgz", + "integrity": "sha512-3qtRjw/HQSMlDWf+X79N206fepf4SOOU6SQLMaq/0KkZLmSjPxAkBOQQ+FxbHKfHmYLZFfdWsO3KA90ceHPSnw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/client-only": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", + "license": "MIT" + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "license": "MIT", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/default-browser": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-5.2.1.tgz", + "integrity": "sha512-WY/3TUME0x3KPYdRRxEJJvXRHV4PyPoUsxtZa78lwItwRQRHhd2U9xOscaT/YTf8uCXIAjeJOFBVEh/7FtD8Xg==", + "license": "MIT", + "dependencies": { + "bundle-name": "^4.1.0", + "default-browser-id": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser-id": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-5.0.0.tgz", + "integrity": "sha512-A6p/pu/6fyBcA1TRz/GqWYPViplrftcW2gZC9q79ngNCKAeR/X3gcEdXQHl4KNXV+3wgIJ1CPkJQ3IHM6lcsyA==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/define-lazy-prop": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz", + "integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/dotenv": { + "version": "16.4.7", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", + "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com/" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ecdsa-sig-formatter": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", + "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "license": "MIT", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/express": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", + "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", + "license": "MIT", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.3", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.7.1", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.3.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.3", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.12", + "proxy-addr": "~2.0.7", + "qs": "6.13.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.19.0", + "serve-static": "1.16.2", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/finalhandler": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "license": "MIT", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/http-proxy-agent/node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/http-proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent/node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/https-proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-docker": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz", + "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==", + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-inside-container": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz", + "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==", + "license": "MIT", + "dependencies": { + "is-docker": "^3.0.0" + }, + "bin": { + "is-inside-container": "cli.js" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-wsl": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.0.tgz", + "integrity": "sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==", + "license": "MIT", + "dependencies": { + "is-inside-container": "^1.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT", + "peer": true + }, + "node_modules/jsonwebtoken": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/jsonwebtoken/-/jsonwebtoken-9.0.2.tgz", + "integrity": "sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ==", + "license": "MIT", + "dependencies": { + "jws": "^3.2.2", + "lodash.includes": "^4.3.0", + "lodash.isboolean": "^3.0.3", + "lodash.isinteger": "^4.0.4", + "lodash.isnumber": "^3.0.3", + "lodash.isplainobject": "^4.0.6", + "lodash.isstring": "^4.0.1", + "lodash.once": "^4.0.0", + "ms": "^2.1.1", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=12", + "npm": ">=6" + } + }, + "node_modules/jsonwebtoken/node_modules/jwa": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-1.4.1.tgz", + "integrity": "sha512-qiLX/xhEEFKUAJ6FiBMbes3w9ATzyk5W7Hvzpa/SLYdxNtng+gcurvrI7TbACjIXlsJyr05/S1oUhZrc63evQA==", + "license": "MIT", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jsonwebtoken/node_modules/jws": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/jws/-/jws-3.2.2.tgz", + "integrity": "sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==", + "license": "MIT", + "dependencies": { + "jwa": "^1.4.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jsonwebtoken/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/jwa": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz", + "integrity": "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==", + "license": "MIT", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jws": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", + "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", + "license": "MIT", + "dependencies": { + "jwa": "^2.0.0", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/lodash.includes": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.includes/-/lodash.includes-4.3.0.tgz", + "integrity": "sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w==", + "license": "MIT" + }, + "node_modules/lodash.isboolean": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz", + "integrity": "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==", + "license": "MIT" + }, + "node_modules/lodash.isinteger": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/lodash.isinteger/-/lodash.isinteger-4.0.4.tgz", + "integrity": "sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA==", + "license": "MIT" + }, + "node_modules/lodash.isnumber": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/lodash.isnumber/-/lodash.isnumber-3.0.3.tgz", + "integrity": "sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw==", + "license": "MIT" + }, + "node_modules/lodash.isplainobject": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", + "integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==", + "license": "MIT" + }, + "node_modules/lodash.isstring": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz", + "integrity": "sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==", + "license": "MIT" + }, + "node_modules/lodash.once": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", + "integrity": "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==", + "license": "MIT" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "peer": true, + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/next": { + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz", + "integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==", + "license": "MIT", + "dependencies": { + "@next/env": "14.2.25", + "@swc/helpers": "0.5.5", + "busboy": "1.6.0", + "caniuse-lite": "^1.0.30001579", + "graceful-fs": "^4.2.11", + "postcss": "8.4.31", + "styled-jsx": "5.1.1" + }, + "bin": { + "next": "dist/bin/next" + }, + "engines": { + "node": ">=18.17.0" + }, + "optionalDependencies": { + "@next/swc-darwin-arm64": "14.2.25", + "@next/swc-darwin-x64": "14.2.25", + "@next/swc-linux-arm64-gnu": "14.2.25", + "@next/swc-linux-arm64-musl": "14.2.25", + "@next/swc-linux-x64-gnu": "14.2.25", + "@next/swc-linux-x64-musl": "14.2.25", + "@next/swc-win32-arm64-msvc": "14.2.25", + "@next/swc-win32-ia32-msvc": "14.2.25", + "@next/swc-win32-x64-msvc": "14.2.25" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0", + "@playwright/test": "^1.41.2", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "sass": "^1.3.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + }, + "@playwright/test": { + "optional": true + }, + "sass": { + "optional": true + } + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/open": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/open/-/open-10.1.0.tgz", + "integrity": "sha512-mnkeQ1qP5Ue2wd+aivTD3NHd/lZ96Lu0jgf0pwktLPtx6cTZiH7tyeGRRHs0zX0rbrahXPnXlUnbeXyaBBuIaw==", + "license": "MIT", + "dependencies": { + "default-browser": "^5.2.1", + "define-lazy-prop": "^3.0.0", + "is-inside-container": "^1.0.0", + "is-wsl": "^3.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/openai": { + "resolved": "..", + "link": true + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.12", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", + "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/postcss": { + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/run-applescript": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-7.0.0.tgz", + "integrity": "sha512-9by4Ij99JUr/MCFBUkDKLWK3G9HVXmabKz9U5MlIAIuvuzkiOicRYs8XJLxX+xahD+mLiiCYDqF9dKAgtzKP1A==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/send": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/serve-static": { + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", + "license": "MIT", + "dependencies": { + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.19.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/stoppable": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/stoppable/-/stoppable-1.1.0.tgz", + "integrity": "sha512-KXDYZ9dszj6bzvnEMRYvxgeTHU74QBFL54XKtP3nyMuJ81CFYtABZ3bAzL2EdFUaEwJOBOgENyFj3R7oTzDyyw==", + "license": "MIT", + "engines": { + "node": ">=4", + "npm": ">=6" + } + }, + "node_modules/streamsearch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", + "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/styled-jsx": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz", + "integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==", + "license": "MIT", + "dependencies": { + "client-only": "0.0.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "peerDependencies": { + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/undici-types": { + "version": "6.20.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz", + "integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==", + "dev": true, + "license": "MIT" + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/zod": { + "version": "3.24.2", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.24.2.tgz", + "integrity": "sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==", + "license": "MIT", + "peer": true, + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-to-json-schema": { + "version": "3.24.5", + "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.5.tgz", + "integrity": "sha512-/AuWwMP+YqiPbsJx5D6TfgRTc4kTLjsh5SOcd4bLsfUg2RcEXrFMJl1DGgdHy2aCfsIA/cr/1JM0xcB2GZji8g==", + "license": "ISC", + "peerDependencies": { + "zod": "^3.24.1" + } + } + } +} diff --git a/examples/package.json b/examples/package.json index c8a5f7087..db01a2c10 100644 --- a/examples/package.json +++ b/examples/package.json @@ -6,14 +6,16 @@ "license": "MIT", "private": true, "dependencies": { + "@azure/identity": "^4.2.0", + "dotenv": "^16.4.7", "express": "^4.18.2", - "next": "^14.1.1", + "next": "^14.2.25", "openai": "file:..", - "zod-to-json-schema": "^3.21.4", - "@azure/identity": "^4.2.0" + "zod-to-json-schema": "^3.21.4" }, "devDependencies": { "@types/body-parser": "^1.19.3", - "@types/express": "^4.17.19" + "@types/express": "^4.17.19", + "@types/web": "^0.0.194" } } diff --git a/examples/realtime/websocket.ts b/examples/realtime/websocket.ts new file mode 100644 index 000000000..6fb4740af --- /dev/null +++ b/examples/realtime/websocket.ts @@ -0,0 +1,48 @@ +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; + +async function main() { + const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + + // access the underlying `ws.WebSocket` instance + rt.socket.addEventListener('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); + }); + + rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue processing events regardless of any errors + throw err; + }); + + rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); + }); + + rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); + rt.on('response.text.done', () => console.log()); + + rt.on('response.done', () => rt.close()); + + rt.socket.addEventListener('close', () => console.log('\nConnection closed!')); +} + +main(); diff --git a/examples/realtime/ws.ts b/examples/realtime/ws.ts new file mode 100644 index 000000000..6cc950b76 --- /dev/null +++ b/examples/realtime/ws.ts @@ -0,0 +1,48 @@ +import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; + +async function main() { + const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + + // access the underlying `ws.WebSocket` instance + rt.socket.on('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); + }); + + rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue processing events regardless of any errors + throw err; + }); + + rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); + }); + + rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); + rt.on('response.text.done', () => console.log()); + + rt.on('response.done', () => rt.close()); + + rt.socket.on('close', () => console.log('\nConnection closed!')); +} + +main(); diff --git a/examples/responses/stream.ts b/examples/responses/stream.ts new file mode 100755 index 000000000..ea3d0849e --- /dev/null +++ b/examples/responses/stream.ts @@ -0,0 +1,24 @@ +#!/usr/bin/env -S npm run tsn -T + +import OpenAI from 'openai'; + +const openai = new OpenAI(); + +async function main() { + const runner = openai.responses + .stream({ + model: 'gpt-4o-2024-08-06', + input: 'solve 8x + 31 = 2', + }) + .on('event', (event) => console.log(event)) + .on('response.output_text.delta', (diff) => process.stdout.write(diff.delta)); + + for await (const event of runner) { + console.log('event', event); + } + + const result = await runner.finalResponse(); + console.log(result); +} + +main(); diff --git a/examples/responses/streaming-tools.ts b/examples/responses/streaming-tools.ts new file mode 100755 index 000000000..87a48d0c3 --- /dev/null +++ b/examples/responses/streaming-tools.ts @@ -0,0 +1,52 @@ +#!/usr/bin/env -S npm run tsn -T + +import { OpenAI } from 'openai'; +import { zodResponsesFunction } from 'openai/helpers/zod'; +import { z } from 'zod'; + +const Table = z.enum(['orders', 'customers', 'products']); +const Column = z.enum([ + 'id', + 'status', + 'expected_delivery_date', + 'delivered_at', + 'shipped_at', + 'ordered_at', + 'canceled_at', +]); +const Operator = z.enum(['=', '>', '<', '<=', '>=', '!=']); +const OrderBy = z.enum(['asc', 'desc']); +const DynamicValue = z.object({ + column_name: Column, +}); + +const Condition = z.object({ + column: Column, + operator: Operator, + value: z.union([z.string(), z.number(), DynamicValue]), +}); + +const Query = z.object({ + table_name: Table, + columns: z.array(Column), + conditions: z.array(Condition), + order_by: OrderBy, +}); + +async function main() { + const client = new OpenAI(); + + const tool = zodResponsesFunction({ name: 'query', parameters: Query }); + + const stream = client.responses.stream({ + model: 'gpt-4o-2024-08-06', + input: 'look up all my orders in november of last year that were fulfilled but not delivered on time', + tools: [tool], + }); + + for await (const event of stream) { + console.dir(event, { depth: 10 }); + } +} + +main(); diff --git a/examples/responses/structured-outputs-tools.ts b/examples/responses/structured-outputs-tools.ts new file mode 100755 index 000000000..29eaabf93 --- /dev/null +++ b/examples/responses/structured-outputs-tools.ts @@ -0,0 +1,60 @@ +#!/usr/bin/env -S npm run tsn -T + +import { OpenAI } from 'openai'; +import { zodResponsesFunction } from 'openai/helpers/zod'; +import { z } from 'zod'; + +const Table = z.enum(['orders', 'customers', 'products']); +const Column = z.enum([ + 'id', + 'status', + 'expected_delivery_date', + 'delivered_at', + 'shipped_at', + 'ordered_at', + 'canceled_at', +]); +const Operator = z.enum(['=', '>', '<', '<=', '>=', '!=']); +const OrderBy = z.enum(['asc', 'desc']); +const DynamicValue = z.object({ + column_name: Column, +}); + +const Condition = z.object({ + column: Column, + operator: Operator, + value: z.union([z.string(), z.number(), DynamicValue]), +}); + +const Query = z.object({ + table_name: Table, + columns: z.array(Column), + conditions: z.array(Condition), + order_by: OrderBy, +}); + +async function main() { + const client = new OpenAI(); + + const tool = zodResponsesFunction({ name: 'query', parameters: Query }); + + const rsp = await client.responses.parse({ + model: 'gpt-4o-2024-08-06', + input: 'look up all my orders in november of last year that were fulfilled but not delivered on time', + tools: [tool], + }); + + console.log(rsp); + + const functionCall = rsp.output[0]!; + + if (functionCall.type !== 'function_call') { + throw new Error('Expected function call'); + } + + const query = functionCall.parsed_arguments; + + console.log(query); +} + +main(); diff --git a/examples/responses/structured-outputs.ts b/examples/responses/structured-outputs.ts new file mode 100755 index 000000000..07ff93a60 --- /dev/null +++ b/examples/responses/structured-outputs.ts @@ -0,0 +1,32 @@ +#!/usr/bin/env -S npm run tsn -T + +import { OpenAI } from 'openai'; +import { zodTextFormat } from 'openai/helpers/zod'; +import { z } from 'zod'; + +const Step = z.object({ + explanation: z.string(), + output: z.string(), +}); + +const MathResponse = z.object({ + steps: z.array(Step), + final_answer: z.string(), +}); + +const client = new OpenAI(); + +async function main() { + const rsp = await client.responses.parse({ + input: 'solve 8x + 31 = 2', + model: 'gpt-4o-2024-08-06', + text: { + format: zodTextFormat(MathResponse, 'math_response'), + }, + }); + + console.log(rsp.output_parsed); + console.log('answer: ', rsp.output_parsed?.final_answer); +} + +main().catch(console.error); diff --git a/examples/speech-to-text.ts b/examples/speech-to-text.ts new file mode 100644 index 000000000..f2eb60b4d --- /dev/null +++ b/examples/speech-to-text.ts @@ -0,0 +1,19 @@ +import OpenAI from 'openai'; +import { recordAudio } from 'openai/helpers/audio'; + +const openai = new OpenAI(); + +async function main(): Promise { + console.log('Recording for 5 seconds...'); + const response = await recordAudio({ timeout: 5000, device: 4 }); + + console.log('Transcribing...'); + const transcription = await openai.audio.transcriptions.create({ + file: response, + model: 'whisper-1', + }); + + console.log(transcription.text); +} + +main().catch(console.error); diff --git a/examples/text-to-speech.ts b/examples/text-to-speech.ts new file mode 100644 index 000000000..5a87adf91 --- /dev/null +++ b/examples/text-to-speech.ts @@ -0,0 +1,23 @@ +import OpenAI from 'openai'; +import { playAudio } from 'openai/helpers/audio'; + +const openai = new OpenAI(); + +const exampleText = ` +I see skies of blue and clouds of white +The bright blessed days, the dark sacred nights +And I think to myself +What a wonderful world +`.trim(); + +async function main(): Promise { + const response = await openai.audio.speech.create({ + model: 'tts-1', + voice: 'nova', + input: exampleText, + }); + + await playAudio(response); +} + +main().catch(console.error); diff --git a/examples/tsconfig.json b/examples/tsconfig.json index 6c3477462..3c43903cf 100644 --- a/examples/tsconfig.json +++ b/examples/tsconfig.json @@ -1,3 +1,3 @@ { - "extends": "../tsconfig.json" + "extends": "../tsconfig.json" } diff --git a/helpers.md b/helpers.md index abf980c82..41b352e5e 100644 --- a/helpers.md +++ b/helpers.md @@ -49,7 +49,7 @@ if (message?.parsed) { The `.parse()` method will also automatically parse `function` tool calls if: -- You use the `zodFunctionTool()` helper method +- You use the `zodFunction()` helper method - You mark your tool schema with `"strict": True` For example: @@ -142,9 +142,7 @@ More information can be found in the documentation: [Assistant Streaming](https: ```ts const run = openai.beta.threads.runs - .stream(thread.id, { - assistant_id: assistant.id, - }) + .stream(thread.id, { assistant_id: assistant.id }) .on('textCreated', (text) => process.stdout.write('\nassistant > ')) .on('textDelta', (textDelta, snapshot) => process.stdout.write(textDelta.value)) .on('toolCallCreated', (toolCall) => process.stdout.write(`\nassistant > ${toolCall.type}\n\n`)) @@ -226,7 +224,7 @@ on in the documentation page [Message](https://platform.openai.com/docs/api-refe ```ts .on('textCreated', (content: Text) => ...) -.on('textDelta', (delta: RunStepDelta, snapshot: Text) => ...) +.on('textDelta', (delta: TextDelta, snapshot: Text) => ...) .on('textDone', (content: Text, snapshot: Message) => ...) ``` @@ -304,47 +302,87 @@ If you need to cancel a stream, you can `break` from a `for await` loop or call See an example of streaming helpers in action in [`examples/stream.ts`](examples/stream.ts). -### Automated Function Calls +### Automated function calls -```ts -openai.chat.completions.runTools({ stream: false, … }, options?): ChatCompletionRunner -openai.chat.completions.runTools({ stream: true, … }, options?): ChatCompletionStreamingRunner -``` +We provide the `openai.beta.chat.completions.runTools({…})` +convenience helper for using function tool calls with the `/chat/completions` endpoint +which automatically call the JavaScript functions you provide +and sends their results back to the `/chat/completions` endpoint, +looping as long as the model requests tool calls. -`openai.chat.completions.runTools()` returns a Runner -for automating function calls with chat completions. -The runner automatically calls the JavaScript functions you provide and sends their results back -to the API, looping as long as the model requests function calls. +If you pass a `parse` function, it will automatically parse the `arguments` for you +and returns any parsing errors to the model to attempt auto-recovery. +Otherwise, the args will be passed to the function you provide as a string. -If you pass a `parse` function, it will automatically parse the `arguments` for you and returns any parsing -errors to the model to attempt auto-recovery. Otherwise, the args will be passed to the function you provide -as a string. +If you pass `tool_choice: {function: {name: …}}` instead of `auto`, +it returns immediately after calling that function (and only loops to auto-recover parsing errors). ```ts -client.chat.completions.runTools({ - model: 'gpt-3.5-turbo', - messages: [{ role: 'user', content: 'How is the weather this week?' }], - tools: [ - { - type: 'function', - function: { - function: getWeather as (args: { location: string; time: Date }) => any, - parse: parseFunction as (args: strings) => { location: string; time: Date }, - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - time: { type: 'string', format: 'date-time' }, +import OpenAI from 'openai'; + +const client = new OpenAI(); + +async function main() { + const runner = client.beta.chat.completions + .runTools({ + model: 'gpt-4o', + messages: [{ role: 'user', content: 'How is the weather this week?' }], + tools: [ + { + type: 'function', + function: { + function: getCurrentLocation, + parameters: { type: 'object', properties: {} }, }, }, - }, - }, - ], -}); + { + type: 'function', + function: { + function: getWeather, + parse: JSON.parse, // or use a validation library like zod for typesafe parsing. + parameters: { + type: 'object', + properties: { + location: { type: 'string' }, + }, + }, + }, + }, + ], + }) + .on('message', (message) => console.log(message)); + + const finalContent = await runner.finalContent(); + console.log(); + console.log('Final content:', finalContent); +} + +async function getCurrentLocation() { + return 'Boston'; // Simulate lookup +} + +async function getWeather(args: { location: string }) { + const { location } = args; + // … do lookup … + return { temperature, precipitation }; +} + +main(); + +// {role: "user", content: "How's the weather this week?"} +// {role: "assistant", tool_calls: [{type: "function", function: {name: "getCurrentLocation", arguments: "{}"}, id: "123"} +// {role: "tool", name: "getCurrentLocation", content: "Boston", tool_call_id: "123"} +// {role: "assistant", tool_calls: [{type: "function", function: {name: "getWeather", arguments: '{"location": "Boston"}'}, id: "1234"}]} +// {role: "tool", name: "getWeather", content: '{"temperature": "50degF", "preciptation": "high"}', tool_call_id: "1234"} +// {role: "assistant", content: "It's looking cold and rainy - you might want to wear a jacket!"} +// +// Final content: "It's looking cold and rainy - you might want to wear a jacket!" ``` -If you pass `function_call: {name: …}` instead of `auto`, it returns immediately after calling that -function (and only loops to auto-recover parsing errors). +Like with `.stream()`, we provide a variety of [helpers and events](helpers.md#chat-events). + +Read more about various examples such as with integrating with [zod](#integrate-with-zod), +[next.js](#integrate-with-nextjs), and [proxying a stream to the browser](#proxy-streaming-to-a-browser). By default, we run the loop up to 10 chat completions from the API. You can change this behavior by adjusting `maxChatCompletions` in the request options object. Note that `max_tokens` is the limit per @@ -662,3 +700,17 @@ client.beta.vectorStores.files.createAndPoll((...) client.beta.vectorStores.fileBatches.createAndPoll((...) client.beta.vectorStores.fileBatches.uploadAndPoll((...) ``` + +# Bulk Upload Helpers + +When creating and interacting with vector stores, you can use the polling helpers to monitor the status of operations. +For convenience, we also provide a bulk upload helper to allow you to simultaneously upload several files at once. + +```ts +const fileList = [ + createReadStream('/home/data/example.pdf'), + ... +]; + +const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, {files: fileList}); +``` diff --git a/jsr.json b/jsr.json new file mode 100644 index 000000000..3c2d41b0f --- /dev/null +++ b/jsr.json @@ -0,0 +1,17 @@ +{ + "name": "@openai/openai", + "version": "4.100.0", + "exports": { + ".": "./index.ts", + "./helpers/zod": "./helpers/zod.ts", + "./beta/realtime/websocket": "./beta/realtime/websocket.ts" + }, + "imports": { + "zod": "npm:zod@3" + }, + "publish": { + "exclude": [ + "!." + ] + } +} diff --git a/jsr.json.orig b/jsr.json.orig new file mode 100644 index 000000000..c7b99a6f6 --- /dev/null +++ b/jsr.json.orig @@ -0,0 +1,25 @@ +{ + "name": "@openai/openai", +<<<<<<< HEAD + "version": "4.87.4", + "exports": { + ".": "./index.ts", + "./helpers/zod": "./helpers/zod.ts", + "./beta/realtime/websocket": "./beta/realtime/websocket.ts" + }, + "imports": { + "zod": "npm:zod@3" + }, +||||||| parent of 0603bcac (chore(internal): version bump (#1393)) + "version": "4.87.3", + "exports": "./index.ts", +======= + "version": "4.87.4", + "exports": "./index.ts", +>>>>>>> 0603bcac (chore(internal): version bump (#1393)) + "publish": { + "exclude": [ + "!." + ] + } +} diff --git a/package.json b/package.json index e20c1b9c1..23205e569 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.67.3", + "version": "4.100.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", @@ -10,7 +10,7 @@ "license": "Apache-2.0", "packageManager": "yarn@1.22.22", "files": [ - "*" + "**/*" ], "private": false, "scripts": { @@ -18,7 +18,7 @@ "build": "./scripts/build", "prepublishOnly": "echo 'to publish, run yarn build && (cd dist; yarn publish)' && exit 1", "format": "prettier --write --cache --cache-strategy metadata . !dist", - "prepare": "if ./scripts/utils/check-is-in-git-install.sh; then ./scripts/build; fi", + "prepare": "if ./scripts/utils/check-is-in-git-install.sh; then ./scripts/build && ./scripts/utils/git-swap.sh; fi", "tsn": "ts-node -r tsconfig-paths/register", "lint": "./scripts/lint", "fix": "./scripts/format" @@ -36,22 +36,23 @@ "@swc/core": "^1.3.102", "@swc/jest": "^0.2.29", "@types/jest": "^29.4.0", + "@types/ws": "^8.5.13", "@typescript-eslint/eslint-plugin": "^6.7.0", "@typescript-eslint/parser": "^6.7.0", "eslint": "^8.49.0", "eslint-plugin-prettier": "^5.0.1", "eslint-plugin-unused-imports": "^3.0.0", - "iconv-lite": "^0.6.3", "fast-check": "^3.22.0", + "iconv-lite": "^0.6.3", "jest": "^29.4.0", "prettier": "^3.0.0", "prettier-2": "npm:prettier@^2", "ts-jest": "^29.1.0", - "ts-morph": "^19.0.0", "ts-node": "^10.5.0", "tsc-multi": "^1.1.0", "tsconfig-paths": "^4.0.0", "typescript": "^4.8.2", + "ws": "^8.18.0", "zod": "^3.23.8" }, "sideEffects": [ @@ -126,9 +127,13 @@ }, "bin": "./bin/cli", "peerDependencies": { + "ws": "^8.18.0", "zod": "^3.23.8" }, "peerDependenciesMeta": { + "ws": { + "optional": true + }, "zod": { "optional": true } diff --git a/realtime.md b/realtime.md new file mode 100644 index 000000000..7e8d84a3c --- /dev/null +++ b/realtime.md @@ -0,0 +1,86 @@ +## Realtime API beta + +The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as [function calling](https://platform.openai.com/docs/guides/function-calling) through a `WebSocket` connection. + +The Realtime API works through a combination of client-sent events and server-sent events. Clients can send events to do things like update session configuration or send text and audio inputs. Server events confirm when audio responses have completed, or when a text response from the model has been received. A full event reference can be found [here](https://platform.openai.com/docs/api-reference/realtime-client-events) and a guide can be found [here](https://platform.openai.com/docs/guides/realtime). + +This SDK supports accessing the Realtime API through the [WebSocket API](https://developer.mozilla.org/en-US/docs/Web/API/WebSocket) or with [ws](https://github.com/websockets/ws). + +Basic text based example with `ws`: + +```ts +// requires `yarn add ws @types/ws` +import { OpenAIRealtimeWS } from 'openai/beta/realtime/ws'; + +const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); + +// access the underlying `ws.WebSocket` instance +rt.socket.on('open', () => { + console.log('Connection opened!'); + rt.send({ + type: 'session.update', + session: { + modalities: ['text'], + model: 'gpt-4o-realtime-preview', + }, + }); + + rt.send({ + type: 'conversation.item.create', + item: { + type: 'message', + role: 'user', + content: [{ type: 'input_text', text: 'Say a couple paragraphs!' }], + }, + }); + + rt.send({ type: 'response.create' }); +}); + +rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue processing events regardless of any errors + throw err; +}); + +rt.on('session.created', (event) => { + console.log('session created!', event.session); + console.log(); +}); + +rt.on('response.text.delta', (event) => process.stdout.write(event.delta)); +rt.on('response.text.done', () => console.log()); + +rt.on('response.done', () => rt.close()); + +rt.socket.on('close', () => console.log('\nConnection closed!')); +``` + +To use the web API `WebSocket` implementation, replace `OpenAIRealtimeWS` with `OpenAIRealtimeWebSocket` and adjust any `rt.socket` access: + +```ts +import { OpenAIRealtimeWebSocket } from 'openai/beta/realtime/websocket'; + +const rt = new OpenAIRealtimeWebSocket({ model: 'gpt-4o-realtime-preview-2024-12-17' }); +// ... +rt.socket.addEventListener('open', () => { + // ... +}); +``` + +A full example can be found [here](https://github.com/openai/openai-node/blob/master/examples/realtime/websocket.ts). + +### Realtime error handling + +When an error is encountered, either on the client side or returned from the server through the [`error` event](https://platform.openai.com/docs/guides/realtime-model-capabilities#error-handling), the `error` event listener will be fired. However, if you haven't registered an `error` event listener then an `unhandled Promise rejection` error will be thrown. + +It is **highly recommended** that you register an `error` event listener and handle errors approriately as typically the underlying connection is still usable. + +```ts +const rt = new OpenAIRealtimeWS({ model: 'gpt-4o-realtime-preview-2024-12-17' }); +rt.on('error', (err) => { + // in a real world scenario this should be logged somewhere as you + // likely want to continue processing events regardless of any errors + throw err; +}); +``` \ No newline at end of file diff --git a/release-please-config.json b/release-please-config.json index 0a9347796..1aa2fb613 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -63,6 +63,10 @@ "extra-files": [ "src/version.ts", "README.md", - "scripts/build-deno" + { + "type": "json", + "path": "jsr.json", + "jsonpath": "$.version" + } ] } diff --git a/scripts/bootstrap b/scripts/bootstrap index 033156d3a..f107c3a24 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -4,7 +4,7 @@ set -e cd "$(dirname "$0")/.." -if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ]; then brew bundle check >/dev/null 2>&1 || { echo "==> Installing Homebrew dependencies…" brew bundle diff --git a/scripts/build b/scripts/build index b4d686af5..4e86f99e2 100755 --- a/scripts/build +++ b/scripts/build @@ -32,7 +32,7 @@ npm exec tsc-multi # copy over handwritten .js/.mjs/.d.ts files cp src/_shims/*.{d.ts,js,mjs,md} dist/_shims cp src/_shims/auto/*.{d.ts,js,mjs} dist/_shims/auto -# we need to add exports = module.exports = OpenAI Node to index.js; +# we need to add exports = module.exports = OpenAI to index.js; # No way to get that from index.ts because it would cause compile errors # when building .mjs node scripts/utils/fix-index-exports.cjs @@ -50,7 +50,7 @@ node scripts/utils/postprocess-files.cjs (cd dist && node -e 'require("openai")') (cd dist && node -e 'import("openai")' --input-type=module) -if [ "${OPENAI_DISABLE_DENO_BUILD:-0}" != "1" ] && command -v deno &> /dev/null && [ -e ./scripts/build-deno ] +if [ "${OPENAI_DISABLE_DENO_BUILD:-0}" != "1" ] && [ -e ./scripts/build-deno ] then ./scripts/build-deno fi diff --git a/scripts/build-deno b/scripts/build-deno index f59404dbc..bce31078e 100755 --- a/scripts/build-deno +++ b/scripts/build-deno @@ -4,47 +4,26 @@ set -exuo pipefail cd "$(dirname "$0")/.." -rm -rf deno; mkdir deno -cp -rp src/* deno +rm -rf dist-deno; mkdir dist-deno +cp -rp src/* jsr.json dist-deno -# x-release-please-start-version -cat << EOF > deno/README.md -# OpenAI Node API Library - Deno build +rm -rf dist-deno/shims -This is a build produced from https://github.com/openai/openai-node – please go there to read the source and docs, file issues, etc. - -Usage: - -\`\`\`ts -import OpenAI from "https://deno.land/x/openai@v4.67.3/mod.ts"; - -const client = new OpenAI(); -\`\`\` - -Note that in most Deno environments, you can also do this: - -\`\`\`ts -import OpenAI from "npm:openai"; -\`\`\` -EOF -# x-release-please-end +rm dist-deno/_shims/node*.{js,mjs,ts} +rm dist-deno/_shims/manual*.{js,mjs,ts} +rm dist-deno/_shims/index.{d.ts,js,mjs} +for file in dist-deno/_shims/*-deno.ts; do + mv -- "$file" "${file%-deno.ts}.ts" +done -rm deno/_shims/auto/*-node.ts -for dir in deno/_shims deno/_shims/auto; do - rm "${dir}"/*.{d.ts,js,mjs} - for file in "${dir}"/*-deno.ts; do - mv -- "$file" "${file%-deno.ts}.ts" - done +rm dist-deno/_shims/auto/*-node.ts +rm dist-deno/_shims/auto/*.{d.ts,js,mjs} +for file in dist-deno/_shims/auto/*-deno.ts; do + mv -- "$file" "${file%-deno.ts}.ts" done -for file in LICENSE CHANGELOG.md; do - if [ -e "${file}" ]; then cp "${file}" deno; fi + +for file in README.md LICENSE CHANGELOG.md; do + if [ -e "${file}" ]; then cp "${file}" dist-deno; fi done -npm exec ts-node -T -- scripts/utils/denoify.ts -deno fmt deno -deno check deno/mod.ts -if [ -e deno_tests ]; then - deno test deno_tests --allow-env -fi - -# make sure that nothing crashes when we load the Deno module -(cd deno && deno run mod.ts) + +node scripts/utils/convert-jsr-readme.cjs ./dist-deno/README.md diff --git a/scripts/git-publish-deno.sh b/scripts/git-publish-deno.sh deleted file mode 100755 index 701db735e..000000000 --- a/scripts/git-publish-deno.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash - -set -exuo pipefail - -cd "$(dirname "$0")/.." - -# This script pushes the contents of the `deno` directory to the `deno` branch, -# and creates a `vx.x.x-deno` tag, so that Deno users can -# import OpenAI from "https://raw.githubusercontent.com/openai/openai-node/vx.x.x-deno/mod.ts" - -# It's also possible to publish to deno.land. You can do this by: -# - Creating a separate GitHub repo -# - Add the deno.land webhook to the repo as described at https://deno.com/add_module -# - Set the following environment variables when running this script: -# - DENO_PUSH_REMOTE_URL - the remote url of the separate GitHub repo -# - DENO_PUSH_BRANCH - the branch you want to push to in that repo (probably `main`) -# - DENO_MAIN_BRANCH - the branch you want as the main branch in that repo (probably `main`, sometimes `master`) -# - DENO_PUSH_VERSION - defaults to version in package.json -# - DENO_PUSH_RELEASE_TAG - defaults to v$DENO_PUSH_VERSION-deno - -die () { - echo >&2 "$@" - exit 1 -} - -# Allow caller to set the following environment variables, but provide defaults -# if unset -# : "${FOO:=bar}" sets FOO=bar unless it's set and non-empty -# https://stackoverflow.com/questions/307503/whats-a-concise-way-to-check-that-environment-variables-are-set-in-a-unix-shell -# https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html - -: "${DENO_PUSH_VERSION:=$(node -p 'require("./package.json").version')}" -: "${DENO_PUSH_BRANCH:=deno}" -: "${DENO_MAIN_BRANCH:=main}" -: "${DENO_PUSH_REMOTE_URL:=$(git remote get-url origin)}" -: "${DENO_GIT_USER_NAME:="Stainless Bot"}" -: "${DENO_GIT_USER_EMAIL:="bot@stainlessapi.com"}" -if [[ $DENO_PUSH_BRANCH = "deno" ]]; then - : "${DENO_PUSH_RELEASE_TAG:="v$DENO_PUSH_VERSION-deno"}" -else - : "${DENO_PUSH_RELEASE_TAG:="v$DENO_PUSH_VERSION"}" -fi - -if [ ! -e deno ]; then ./scripts/build; fi - -# We want to commit and push a branch where everything inside the deno -# directory is at root level in the branch. - -# We can do this by temporarily creating a git repository inside deno, -# committing files to the branch, and pushing it to the remote. - -cd deno -rm -rf .git -git init -b "$DENO_MAIN_BRANCH" -git remote add origin "$DENO_PUSH_REMOTE_URL" -if git fetch origin "$DENO_PUSH_RELEASE_TAG"; then - die "Tag $DENO_PUSH_RELEASE_TAG already exists" -fi -if git fetch origin "$DENO_PUSH_BRANCH"; then - # the branch already exists on the remote; "check out" the branch without - # changing files in the working directory - git branch "$DENO_PUSH_BRANCH" -t origin/"$DENO_PUSH_BRANCH" - git symbolic-ref HEAD refs/heads/"$DENO_PUSH_BRANCH" - git reset -else - # the branch doesn't exist on the remote yet - git checkout -b "$DENO_PUSH_BRANCH" -fi - -git config user.email "$DENO_GIT_USER_EMAIL" -git config user.name "$DENO_GIT_USER_NAME" - -git add . -git commit -m "chore(deno): release $DENO_PUSH_VERSION" -git tag -a "$DENO_PUSH_RELEASE_TAG" -m "release $DENO_PUSH_VERSION" -git push --tags --set-upstream origin "$DENO_PUSH_BRANCH" -rm -rf .git diff --git a/scripts/utils/check-is-in-git-install.sh b/scripts/utils/check-is-in-git-install.sh index 36bcedc20..1354eb432 100755 --- a/scripts/utils/check-is-in-git-install.sh +++ b/scripts/utils/check-is-in-git-install.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Check if you happen to call prepare for a repository that's already in node_modules. [ "$(basename "$(dirname "$PWD")")" = 'node_modules' ] || # The name of the containing directory that 'npm` uses, which looks like diff --git a/scripts/utils/convert-jsr-readme.cjs b/scripts/utils/convert-jsr-readme.cjs new file mode 100644 index 000000000..f9d089c73 --- /dev/null +++ b/scripts/utils/convert-jsr-readme.cjs @@ -0,0 +1,140 @@ +const fs = require('fs'); +const { parse } = require('@typescript-eslint/parser'); +const { TSError } = require('@typescript-eslint/typescript-estree'); + +/** + * Quick and dirty AST traversal + */ +function traverse(node, visitor) { + if (!node || typeof node.type !== 'string') return; + visitor.node?.(node); + visitor[node.type]?.(node); + for (const key in node) { + const value = node[key]; + if (Array.isArray(value)) { + for (const elem of value) traverse(elem, visitor); + } else if (value instanceof Object) { + traverse(value, visitor); + } + } +} + +/** + * Helper method for replacing arbitrary ranges of text in input code. + */ +function replaceRanges(code, replacer) { + const replacements = []; + replacer({ replace: (range, replacement) => replacements.push({ range, replacement }) }); + + if (!replacements.length) return code; + replacements.sort((a, b) => a.range[0] - b.range[0]); + const overlapIndex = replacements.findIndex( + (r, index) => index > 0 && replacements[index - 1].range[1] > r.range[0], + ); + if (overlapIndex >= 0) { + throw new Error( + `replacements overlap: ${JSON.stringify(replacements[overlapIndex - 1])} and ${JSON.stringify( + replacements[overlapIndex], + )}`, + ); + } + + const parts = []; + let end = 0; + for (const { + range: [from, to], + replacement, + } of replacements) { + if (from > end) parts.push(code.substring(end, from)); + parts.push(replacement); + end = to; + } + if (end < code.length) parts.push(code.substring(end)); + return parts.join(''); +} + +function replaceProcessEnv(content) { + // Replace process.env['KEY'] and process.env.KEY with Deno.env.get('KEY') + return content.replace(/process\.env(?:\.|\[['"])(.+?)(?:['"]\])/g, "Deno.env.get('$1')"); +} + +function replaceProcessStdout(content) { + return content.replace(/process\.stdout.write\(([^)]+)\)/g, 'Deno.stdout.writeSync($1)'); +} + +function replaceInstallationDirections(content) { + // Remove npm installation section + return content.replace(/```sh\nnpm install.*?\n```.*### Installation from JSR\n\n/s, ''); +} + +/** + * Maps over module paths in imports and exports + */ +function replaceImports(code, config) { + try { + const ast = parse(code, { sourceType: 'module', range: true }); + return replaceRanges(code, ({ replace }) => + traverse(ast, { + node(node) { + switch (node.type) { + case 'ImportDeclaration': + case 'ExportNamedDeclaration': + case 'ExportAllDeclaration': + case 'ImportExpression': + if (node.source) { + const { range, value } = node.source; + if (value.startsWith(config.npm)) { + replace(range, JSON.stringify(value.replace(config.npm, config.jsr))); + } + } + } + }, + }), + ); + } catch (e) { + if (e instanceof TSError) { + // This can error if the code block is not valid TS, in this case give up trying to transform the imports. + console.warn(`Original codeblock could not be parsed, replace import skipped: ${e}\n\n${code}`); + return code; + } + throw e; + } +} + +function processReadme(config, file) { + try { + let readmeContent = fs.readFileSync(file, 'utf8'); + + // First replace installation directions + readmeContent = replaceInstallationDirections(readmeContent); + + // Replace content in all code blocks with a single regex + readmeContent = readmeContent.replaceAll( + /```(?:typescript|ts|javascript|js)\n([\s\S]*?)```/g, + (match, codeBlock) => { + try { + let transformedCode = codeBlock.trim(); + transformedCode = replaceImports(transformedCode, config); + transformedCode = replaceProcessEnv(transformedCode); + transformedCode = replaceProcessStdout(transformedCode); + return '```typescript\n' + transformedCode + '\n```'; + } catch (error) { + console.warn(`Failed to transform code block: ${error}\n\n${codeBlock}`); + return match; // Return original code block if transformation fails + } + }, + ); + + fs.writeFileSync(file, readmeContent); + } catch (error) { + console.error('Error processing README:', error); + throw error; + } +} + +const config = { + npm: 'openai', + jsr: '@openai/openai', +}; + +processReadme(config, process.argv[2]); diff --git a/scripts/utils/denoify.ts b/scripts/utils/denoify.ts deleted file mode 100644 index 52705802a..000000000 --- a/scripts/utils/denoify.ts +++ /dev/null @@ -1,226 +0,0 @@ -import path from 'path'; -import * as tm from 'ts-morph'; -import { name as pkgName } from '../../package.json'; -import fs from 'fs'; - -const rootDir = path.resolve(__dirname, '../..'); -const denoDir = path.join(rootDir, 'deno'); -const tsConfigFilePath = path.join(rootDir, 'tsconfig.deno.json'); - -async function denoify() { - const project = new tm.Project({ tsConfigFilePath }); - - for (const file of project.getSourceFiles()) { - if (!file.getFilePath().startsWith(denoDir + '/')) continue; - - let addedBuffer = false, - addedProcess = false; - file.forEachDescendant((node) => { - switch (node.getKind()) { - case tm.ts.SyntaxKind.ExportDeclaration: { - const decl: tm.ExportDeclaration = node as any; - if (decl.isTypeOnly()) return; - for (const named of decl.getNamedExports()) { - // Convert `export { Foo } from './foo.ts'` - // to `export { type Foo } from './foo.ts'` - // if `./foo.ts` only exports types for `Foo` - if (!named.isTypeOnly() && !hasValueDeclarations(named)) { - named.replaceWithText(`type ${named.getText()}`); - } - } - break; - } - case tm.ts.SyntaxKind.ImportEqualsDeclaration: { - const decl: tm.ImportEqualsDeclaration = node as any; - if (decl.isTypeOnly()) return; - - const ref = decl.getModuleReference(); - if (!hasValueDeclarations(ref)) { - const params = isBuiltinType(ref.getType()) ? [] : ref.getType().getTypeArguments(); - if (params.length) { - const paramsStr = params.map((p: tm.TypeParameter) => p.getText()).join(', '); - const bindingsStr = params - .map((p: tm.TypeParameter) => p.getSymbol()?.getName() || p.getText()) - .join(', '); - decl.replaceWithText( - `export type ${decl.getName()}<${paramsStr}> = ${ref.getText()}<${bindingsStr}>`, - ); - } else { - decl.replaceWithText(`export type ${decl.getName()} = ${ref.getText()}`); - } - } - break; - } - case tm.ts.SyntaxKind.Identifier: { - const id = node as tm.Identifier; - if (!addedBuffer && id.getText() === 'Buffer') { - addedBuffer = true; - file?.addVariableStatement({ - declarations: [ - { - name: 'Buffer', - type: 'any', - }, - ], - hasDeclareKeyword: true, - }); - file?.addTypeAlias({ - name: 'Buffer', - type: 'any', - }); - } - if (!addedProcess && id.getText() === 'process') { - addedProcess = true; - file?.addVariableStatement({ - declarations: [ - { - name: 'process', - type: 'any', - }, - ], - hasDeclareKeyword: true, - }); - } - } - } - }); - } - - await project.save(); - - for (const file of project.getSourceFiles()) { - if (!file.getFilePath().startsWith(denoDir + '/')) continue; - for (const decl of [...file.getImportDeclarations(), ...file.getExportDeclarations()]) { - const moduleSpecifier = decl.getModuleSpecifier(); - if (!moduleSpecifier) continue; - let specifier = moduleSpecifier.getLiteralValue().replace(/^node:/, ''); - if (!specifier || specifier.startsWith('http')) continue; - - if (nodeStdModules.has(specifier)) { - // convert node builtins to deno.land/std - specifier = `https://deno.land/std@0.177.0/node/${specifier}.ts`; - } else if (specifier.startsWith(pkgName + '/')) { - // convert self-referencing module specifiers to relative paths - specifier = file.getRelativePathAsModuleSpecifierTo(denoDir + specifier.substring(pkgName.length)); - } else if (!decl.isModuleSpecifierRelative()) { - specifier = `npm:${specifier}`; - } - - if (specifier.startsWith('./') || specifier.startsWith('../')) { - // there may be CJS directory module specifiers that implicitly resolve - // to /index.ts. Add an explicit /index.ts to the end - const sourceFile = decl.getModuleSpecifierSourceFile(); - if (sourceFile && /\/index\.ts$/.test(sourceFile.getFilePath()) && !/\/mod\.ts$/.test(specifier)) { - if (/\/index(\.ts)?$/.test(specifier)) { - specifier = specifier.replace(/\/index(\.ts)?$/, '/mod.ts'); - } else { - specifier += '/mod.ts'; - } - } - // add explicit .ts file extensions to relative module specifiers - specifier = specifier.replace(/(\.[^./]*)?$/, '.ts'); - } - moduleSpecifier.replaceWithText(JSON.stringify(specifier)); - } - } - - await project.save(); - - await Promise.all( - project.getSourceFiles().map(async (f) => { - const filePath = f.getFilePath(); - if (filePath.endsWith('index.ts')) { - const newPath = filePath.replace(/index\.ts$/, 'mod.ts'); - await fs.promises.rename(filePath, newPath); - } - }), - ); -} - -const nodeStdModules = new Set([ - 'assert', - 'assertion_error', - 'async_hooks', - 'buffer', - 'child_process', - 'cluster', - 'console', - 'constants', - 'crypto', - 'dgram', - 'diagnostics_channel', - 'dns', - 'domain', - 'events', - 'fs', - 'global', - 'http', - 'http2', - 'https', - 'inspector', - 'module_all', - 'module_esm', - 'module', - 'net', - 'os', - 'path', - 'perf_hooks', - 'process', - 'punycode', - 'querystring', - 'readline', - 'repl', - 'stream', - 'string_decoder', - 'sys', - 'timers', - 'tls', - 'tty', - 'upstream_modules', - 'url', - 'util', - 'v8', - 'vm', - 'wasi', - 'worker_threads', - 'zlib', -]); - -const typeDeclarationKinds = new Set([ - tm.ts.SyntaxKind.InterfaceDeclaration, - tm.ts.SyntaxKind.ModuleDeclaration, - tm.ts.SyntaxKind.TypeAliasDeclaration, -]); - -const builtinTypeNames = new Set(['Array', 'Set', 'Map', 'Record', 'Promise']); - -function isBuiltinType(type: tm.Type): boolean { - const symbol = type.getSymbol(); - return ( - symbol != null && - builtinTypeNames.has(symbol.getName()) && - symbol.getDeclarations().some((d) => d.getSourceFile().getFilePath().includes('node_modules/typescript')) - ); -} - -function hasValueDeclarations(nodes?: tm.Node): boolean; -function hasValueDeclarations(nodes?: tm.Node[]): boolean; -function hasValueDeclarations(nodes?: tm.Node | tm.Node[]): boolean { - if (nodes && !Array.isArray(nodes)) { - return ( - !isBuiltinType(nodes.getType()) && hasValueDeclarations(nodes.getType().getSymbol()?.getDeclarations()) - ); - } - return nodes ? - nodes.some((n) => { - const parent = n.getParent(); - return ( - !typeDeclarationKinds.has(n.getKind()) && - // sometimes the node will be the right hand side of a type alias - (!parent || !typeDeclarationKinds.has(parent.getKind())) - ); - }) - : false; -} - -denoify(); diff --git a/scripts/utils/git-swap.sh b/scripts/utils/git-swap.sh new file mode 100755 index 000000000..79d1888eb --- /dev/null +++ b/scripts/utils/git-swap.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -exuo pipefail +# the package is published to NPM from ./dist +# we want the final file structure for git installs to match the npm installs, so we + +# delete everything except ./dist and ./node_modules +find . -maxdepth 1 -mindepth 1 ! -name 'dist' ! -name 'node_modules' -exec rm -rf '{}' + + +# move everything from ./dist to . +mv dist/* . + +# delete the now-empty ./dist +rmdir dist diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh new file mode 100755 index 000000000..0e8490199 --- /dev/null +++ b/scripts/utils/upload-artifact.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -exuo pipefail + +RESPONSE=$(curl -X POST "$URL" \ + -H "Authorization: Bearer $AUTH" \ + -H "Content-Type: application/json") + +SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url') + +if [[ "$SIGNED_URL" == "null" ]]; then + echo -e "\033[31mFailed to get signed URL.\033[0m" + exit 1 +fi + +UPLOAD_RESPONSE=$(tar -cz dist | curl -v -X PUT \ + -H "Content-Type: application/gzip" \ + --data-binary @- "$SIGNED_URL" 2>&1) + +if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then + echo -e "\033[32mUploaded build to Stainless storage.\033[0m" + echo -e "\033[32mInstallation: npm install 'https://pkg.stainless.com/s/openai-node/$SHA'\033[0m" +else + echo -e "\033[31mFailed to upload artifact.\033[0m" + exit 1 +fi diff --git a/src/_shims/index-deno.ts b/src/_shims/index-deno.ts index d9eabb5a9..e83c7a6d3 100644 --- a/src/_shims/index-deno.ts +++ b/src/_shims/index-deno.ts @@ -108,3 +108,5 @@ export declare class FsReadStream extends Readable { const _ReadableStream = ReadableStream; type _ReadableStream = ReadableStream; export { _ReadableStream as ReadableStream }; + +export const init = () => {}; diff --git a/src/_shims/index.d.ts b/src/_shims/index.d.ts index d867b293b..107cf7fd6 100644 --- a/src/_shims/index.d.ts +++ b/src/_shims/index.d.ts @@ -79,3 +79,5 @@ export function fileFromPath(path: string, options?: FileFromPathOptions): Promi export function fileFromPath(path: string, filename?: string, options?: FileFromPathOptions): Promise; export function isFsReadStream(value: any): value is FsReadStream; + +export const init: () => void; diff --git a/src/_shims/index.js b/src/_shims/index.js index b5fc8229e..959f2b9ce 100644 --- a/src/_shims/index.js +++ b/src/_shims/index.js @@ -3,7 +3,9 @@ */ const shims = require('./registry'); const auto = require('openai/_shims/auto/runtime'); -if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +exports.init = () => { + if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +}; for (const property of Object.keys(shims)) { Object.defineProperty(exports, property, { get() { @@ -11,3 +13,5 @@ for (const property of Object.keys(shims)) { }, }); } + +exports.init(); diff --git a/src/_shims/index.mjs b/src/_shims/index.mjs index 81665e610..26d7a716c 100644 --- a/src/_shims/index.mjs +++ b/src/_shims/index.mjs @@ -3,5 +3,9 @@ */ import * as shims from './registry.mjs'; import * as auto from 'openai/_shims/auto/runtime'; -if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +export const init = () => { + if (!shims.kind) shims.setShims(auto.getRuntime(), { auto: true }); +}; export * from './registry.mjs'; + +init(); diff --git a/src/_vendor/zod-to-json-schema/parsers/object.ts b/src/_vendor/zod-to-json-schema/parsers/object.ts index f2120c8fe..25e5db116 100644 --- a/src/_vendor/zod-to-json-schema/parsers/object.ts +++ b/src/_vendor/zod-to-json-schema/parsers/object.ts @@ -39,12 +39,20 @@ export function parseObjectDef(def: ZodObjectDef, refs: Refs) { [propName, propDef], ) => { if (propDef === undefined || propDef._def === undefined) return acc; + const propertyPath = [...refs.currentPath, 'properties', propName]; const parsedDef = parseDef(propDef._def, { ...refs, - currentPath: [...refs.currentPath, 'properties', propName], - propertyPath: [...refs.currentPath, 'properties', propName], + currentPath: propertyPath, + propertyPath, }); if (parsedDef === undefined) return acc; + if (refs.openaiStrictMode && propDef.isOptional() && !propDef.isNullable()) { + console.warn( + `Zod field at \`${propertyPath.join( + '/', + )}\` uses \`.optional()\` without \`.nullable()\` which is not supported by the API. See: https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#all-fields-must-be-required\nThis will become an error in a future version of the SDK.`, + ); + } return { properties: { ...acc.properties, diff --git a/src/beta/realtime/index.ts b/src/beta/realtime/index.ts new file mode 100644 index 000000000..75f0f3088 --- /dev/null +++ b/src/beta/realtime/index.ts @@ -0,0 +1 @@ +export { OpenAIRealtimeError } from './internal-base'; diff --git a/src/beta/realtime/internal-base.ts b/src/beta/realtime/internal-base.ts new file mode 100644 index 000000000..b704812ee --- /dev/null +++ b/src/beta/realtime/internal-base.ts @@ -0,0 +1,93 @@ +import { RealtimeClientEvent, RealtimeServerEvent, ErrorEvent } from '../../resources/beta/realtime/realtime'; +import { EventEmitter } from '../../lib/EventEmitter'; +import { OpenAIError } from '../../error'; +import OpenAI, { AzureOpenAI } from '../../index'; + +export class OpenAIRealtimeError extends OpenAIError { + /** + * The error data that the API sent back in an `error` event. + */ + error?: ErrorEvent.Error | undefined; + + /** + * The unique ID of the server event. + */ + event_id?: string | undefined; + + constructor(message: string, event: ErrorEvent | null) { + super(message); + + this.error = event?.error; + this.event_id = event?.event_id; + } +} + +type Simplify = { [KeyType in keyof T]: T[KeyType] } & {}; + +type RealtimeEvents = Simplify< + { + event: (event: RealtimeServerEvent) => void; + error: (error: OpenAIRealtimeError) => void; + } & { + [EventType in Exclude]: ( + event: Extract, + ) => unknown; + } +>; + +export abstract class OpenAIRealtimeEmitter extends EventEmitter { + /** + * Send an event to the API. + */ + abstract send(event: RealtimeClientEvent): void; + + /** + * Close the websocket connection. + */ + abstract close(props?: { code: number; reason: string }): void; + + protected _onError(event: null, message: string, cause: any): void; + protected _onError(event: ErrorEvent, message?: string | undefined): void; + protected _onError(event: ErrorEvent | null, message?: string | undefined, cause?: any): void { + message = + event?.error ? + `${event.error.message} code=${event.error.code} param=${event.error.param} type=${event.error.type} event_id=${event.error.event_id}` + : message ?? 'unknown error'; + + if (!this._hasListener('error')) { + const error = new OpenAIRealtimeError( + message + + `\n\nTo resolve these unhandled rejection errors you should bind an \`error\` callback, e.g. \`rt.on('error', (error) => ...)\` `, + event, + ); + // @ts-ignore + error.cause = cause; + Promise.reject(error); + return; + } + + const error = new OpenAIRealtimeError(message, event); + // @ts-ignore + error.cause = cause; + + this._emit('error', error); + } +} + +export function isAzure(client: Pick): client is AzureOpenAI { + return client instanceof AzureOpenAI; +} + +export function buildRealtimeURL(client: Pick, model: string): URL { + const path = '/realtime'; + const baseURL = client.baseURL; + const url = new URL(baseURL + (baseURL.endsWith('/') ? path.slice(1) : path)); + url.protocol = 'wss'; + if (isAzure(client)) { + url.searchParams.set('api-version', client.apiVersion); + url.searchParams.set('deployment', model); + } else { + url.searchParams.set('model', model); + } + return url; +} diff --git a/src/beta/realtime/websocket.ts b/src/beta/realtime/websocket.ts new file mode 100644 index 000000000..e8900e809 --- /dev/null +++ b/src/beta/realtime/websocket.ts @@ -0,0 +1,143 @@ +import { AzureOpenAI, OpenAI } from '../../index'; +import { OpenAIError } from '../../error'; +import * as Core from '../../core'; +import type { RealtimeClientEvent, RealtimeServerEvent } from '../../resources/beta/realtime/realtime'; +import { OpenAIRealtimeEmitter, buildRealtimeURL, isAzure } from './internal-base'; + +interface MessageEvent { + data: string; +} + +type _WebSocket = + typeof globalThis extends ( + { + WebSocket: infer ws extends abstract new (...args: any) => any; + } + ) ? + // @ts-ignore + InstanceType + : any; + +export class OpenAIRealtimeWebSocket extends OpenAIRealtimeEmitter { + url: URL; + socket: _WebSocket; + + constructor( + props: { + model: string; + dangerouslyAllowBrowser?: boolean; + /** + * Callback to mutate the URL, needed for Azure. + * @internal + */ + onURL?: (url: URL) => void; + }, + client?: Pick, + ) { + super(); + + const dangerouslyAllowBrowser = + props.dangerouslyAllowBrowser ?? + (client as any)?._options?.dangerouslyAllowBrowser ?? + (client?.apiKey.startsWith('ek_') ? true : null); + + if (!dangerouslyAllowBrowser && Core.isRunningInBrowser()) { + throw new OpenAIError( + "It looks like you're running in a browser-like environment.\n\nThis is disabled by default, as it risks exposing your secret API credentials to attackers.\n\nYou can avoid this error by creating an ephemeral session token:\nhttps://platform.openai.com/docs/api-reference/realtime-sessions\n", + ); + } + + client ??= new OpenAI({ dangerouslyAllowBrowser }); + + this.url = buildRealtimeURL(client, props.model); + props.onURL?.(this.url); + + // @ts-ignore + this.socket = new WebSocket(this.url.toString(), [ + 'realtime', + ...(isAzure(client) ? [] : [`openai-insecure-api-key.${client.apiKey}`]), + 'openai-beta.realtime-v1', + ]); + + this.socket.addEventListener('message', (websocketEvent: MessageEvent) => { + const event = (() => { + try { + return JSON.parse(websocketEvent.data.toString()) as RealtimeServerEvent; + } catch (err) { + this._onError(null, 'could not parse websocket event', err); + return null; + } + })(); + + if (event) { + this._emit('event', event); + + if (event.type === 'error') { + this._onError(event); + } else { + // @ts-expect-error TS isn't smart enough to get the relationship right here + this._emit(event.type, event); + } + } + }); + + this.socket.addEventListener('error', (event: any) => { + this._onError(null, event.message, null); + }); + + if (isAzure(client)) { + if (this.url.searchParams.get('Authorization') !== null) { + this.url.searchParams.set('Authorization', ''); + } else { + this.url.searchParams.set('api-key', ''); + } + } + } + + static async azure( + client: Pick, + options: { deploymentName?: string; dangerouslyAllowBrowser?: boolean } = {}, + ): Promise { + const token = await client._getAzureADToken(); + function onURL(url: URL) { + if (client.apiKey !== '') { + url.searchParams.set('api-key', client.apiKey); + } else { + if (token) { + url.searchParams.set('Authorization', `Bearer ${token}`); + } else { + throw new Error('AzureOpenAI is not instantiated correctly. No API key or token provided.'); + } + } + } + const deploymentName = options.deploymentName ?? client.deploymentName; + if (!deploymentName) { + throw new Error('No deployment name provided'); + } + const { dangerouslyAllowBrowser } = options; + return new OpenAIRealtimeWebSocket( + { + model: deploymentName, + onURL, + ...(dangerouslyAllowBrowser ? { dangerouslyAllowBrowser } : {}), + }, + client, + ); + } + + send(event: RealtimeClientEvent) { + try { + this.socket.send(JSON.stringify(event)); + } catch (err) { + this._onError(null, 'could not send data', err); + } + } + + close(props?: { code: number; reason: string }) { + try { + this.socket.close(props?.code ?? 1000, props?.reason ?? 'OK'); + } catch (err) { + this._onError(null, 'could not close the connection', err); + } + } +} diff --git a/src/beta/realtime/ws.ts b/src/beta/realtime/ws.ts new file mode 100644 index 000000000..3f51dfc4b --- /dev/null +++ b/src/beta/realtime/ws.ts @@ -0,0 +1,96 @@ +import * as WS from 'ws'; +import { AzureOpenAI, OpenAI } from '../../index'; +import type { RealtimeClientEvent, RealtimeServerEvent } from '../../resources/beta/realtime/realtime'; +import { OpenAIRealtimeEmitter, buildRealtimeURL, isAzure } from './internal-base'; + +export class OpenAIRealtimeWS extends OpenAIRealtimeEmitter { + url: URL; + socket: WS.WebSocket; + + constructor( + props: { model: string; options?: WS.ClientOptions | undefined }, + client?: Pick, + ) { + super(); + client ??= new OpenAI(); + + this.url = buildRealtimeURL(client, props.model); + this.socket = new WS.WebSocket(this.url, { + ...props.options, + headers: { + ...props.options?.headers, + ...(isAzure(client) ? {} : { Authorization: `Bearer ${client.apiKey}` }), + 'OpenAI-Beta': 'realtime=v1', + }, + }); + + this.socket.on('message', (wsEvent) => { + const event = (() => { + try { + return JSON.parse(wsEvent.toString()) as RealtimeServerEvent; + } catch (err) { + this._onError(null, 'could not parse websocket event', err); + return null; + } + })(); + + if (event) { + this._emit('event', event); + + if (event.type === 'error') { + this._onError(event); + } else { + // @ts-expect-error TS isn't smart enough to get the relationship right here + this._emit(event.type, event); + } + } + }); + + this.socket.on('error', (err) => { + this._onError(null, err.message, err); + }); + } + + static async azure( + client: Pick, + options: { deploymentName?: string; options?: WS.ClientOptions | undefined } = {}, + ): Promise { + const deploymentName = options.deploymentName ?? client.deploymentName; + if (!deploymentName) { + throw new Error('No deployment name provided'); + } + return new OpenAIRealtimeWS( + { model: deploymentName, options: { headers: await getAzureHeaders(client) } }, + client, + ); + } + + send(event: RealtimeClientEvent) { + try { + this.socket.send(JSON.stringify(event)); + } catch (err) { + this._onError(null, 'could not send data', err); + } + } + + close(props?: { code: number; reason: string }) { + try { + this.socket.close(props?.code ?? 1000, props?.reason ?? 'OK'); + } catch (err) { + this._onError(null, 'could not close the connection', err); + } + } +} + +async function getAzureHeaders(client: Pick) { + if (client.apiKey !== '') { + return { 'api-key': client.apiKey }; + } else { + const token = await client._getAzureADToken(); + if (token) { + return { Authorization: `Bearer ${token}` }; + } else { + throw new Error('AzureOpenAI is not instantiated correctly. No API key or token provided.'); + } + } +} diff --git a/src/core.ts b/src/core.ts index d78e9e926..cfd4eeaa6 100644 --- a/src/core.ts +++ b/src/core.ts @@ -17,7 +17,12 @@ import { type RequestInit, type Response, type HeadersInit, + init, } from './_shims/index'; + +// try running side effects outside of _shims/index to workaround https://github.com/vercel/next.js/issues/76881 +init(); + export { type Response }; import { BlobLike, isBlobLike, isMultipartBody } from './uploads'; export { @@ -29,6 +34,20 @@ export { export type Fetch = (url: RequestInfo, init?: RequestInit) => Promise; +/** + * An alias to the builtin `Array` type so we can + * easily alias it in import statements if there are name clashes. + */ +type _Array = Array; + +/** + * An alias to the builtin `Record` type so we can + * easily alias it in import statements if there are name clashes. + */ +type _Record = Record; + +export type { _Array as Array, _Record as Record }; + type PromiseOrValue = T | Promise; type APIResponseProps = { @@ -62,8 +81,8 @@ async function defaultParseResponse(props: APIResponseProps): Promise( - options: FinalRequestOptions, + inputOptions: FinalRequestOptions, { retryCount = 0 }: { retryCount?: number } = {}, ): { req: RequestInit; url: string; timeout: number } { + const options = { ...inputOptions }; const { method, path, query, headers: headers = {} } = options; const body = @@ -327,9 +347,9 @@ export abstract class APIClient { const url = this.buildURL(path!, query); if ('timeout' in options) validatePositiveInteger('timeout', options.timeout); - const timeout = options.timeout ?? this.timeout; + options.timeout = options.timeout ?? this.timeout; const httpAgent = options.httpAgent ?? this.httpAgent ?? getDefaultAgent(url); - const minAgentTimeout = timeout + 1000; + const minAgentTimeout = options.timeout + 1000; if ( typeof (httpAgent as any)?.options?.timeout === 'number' && minAgentTimeout > ((httpAgent as any).options.timeout ?? 0) @@ -342,8 +362,8 @@ export abstract class APIClient { } if (this.idempotencyHeader && method !== 'get') { - if (!options.idempotencyKey) options.idempotencyKey = this.defaultIdempotencyKey(); - headers[this.idempotencyHeader] = options.idempotencyKey; + if (!inputOptions.idempotencyKey) inputOptions.idempotencyKey = this.defaultIdempotencyKey(); + headers[this.idempotencyHeader] = inputOptions.idempotencyKey; } const reqHeaders = this.buildHeaders({ options, headers, contentLength, retryCount }); @@ -358,7 +378,7 @@ export abstract class APIClient { signal: options.signal ?? null, }; - return { req, url, timeout }; + return { req, url, timeout: options.timeout }; } private buildHeaders({ @@ -386,11 +406,22 @@ export abstract class APIClient { delete reqHeaders['content-type']; } - // Don't set the retry count header if it was already set or removed by the caller. We check `headers`, - // which can contain nulls, instead of `reqHeaders` to account for the removal case. - if (getHeader(headers, 'x-stainless-retry-count') === undefined) { + // Don't set theses headers if they were already set or removed through default headers or by the caller. + // We check `defaultHeaders` and `headers`, which can contain nulls, instead of `reqHeaders` to account + // for the removal case. + if ( + getHeader(defaultHeaders, 'x-stainless-retry-count') === undefined && + getHeader(headers, 'x-stainless-retry-count') === undefined + ) { reqHeaders['x-stainless-retry-count'] = String(retryCount); } + if ( + getHeader(defaultHeaders, 'x-stainless-timeout') === undefined && + getHeader(headers, 'x-stainless-timeout') === undefined && + options.timeout + ) { + reqHeaders['x-stainless-timeout'] = String(Math.trunc(options.timeout / 1000)); + } this.validateHeaders(reqHeaders, headers); @@ -418,7 +449,7 @@ export abstract class APIClient { !headers ? {} : Symbol.iterator in headers ? Object.fromEntries(Array.from(headers as Iterable).map((header) => [...header])) - : { ...headers } + : { ...(headers as any as Record) } ); } @@ -427,7 +458,7 @@ export abstract class APIClient { error: Object | undefined, message: string | undefined, headers: Headers | undefined, - ) { + ): APIError { return APIError.generate(status, error, message, headers); } @@ -553,20 +584,24 @@ export abstract class APIClient { const timeout = setTimeout(() => controller.abort(), ms); + const fetchOptions = { + signal: controller.signal as any, + ...options, + }; + if (fetchOptions.method) { + // Custom methods like 'patch' need to be uppercased + // See https://github.com/nodejs/undici/issues/2294 + fetchOptions.method = fetchOptions.method.toUpperCase(); + } + return ( - this.getRequestClient() - // use undefined this binding; fetch errors if bound to something else in browser/cloudflare - .fetch.call(undefined, url, { signal: controller.signal as any, ...options }) - .finally(() => { - clearTimeout(timeout); - }) + // use undefined this binding; fetch errors if bound to something else in browser/cloudflare + this.fetch.call(undefined, url, fetchOptions).finally(() => { + clearTimeout(timeout); + }) ); } - protected getRequestClient(): RequestClient { - return { fetch: this.fetch }; - } - private shouldRetry(response: Response): boolean { // Note this is not a standard header. const shouldRetryHeader = response.headers.get('x-should-retry'); @@ -699,9 +734,9 @@ export abstract class AbstractPage implements AsyncIterable { return await this.#client.requestAPIList(this.constructor as any, nextOptions); } - async *iterPages() { + async *iterPages(): AsyncGenerator { // eslint-disable-next-line @typescript-eslint/no-this-alias - let page: AbstractPage = this; + let page: this = this; yield page; while (page.hasNextPage()) { page = await page.getNextPage(); @@ -709,7 +744,7 @@ export abstract class AbstractPage implements AsyncIterable { } } - async *[Symbol.asyncIterator]() { + async *[Symbol.asyncIterator](): AsyncGenerator { for await (const page of this.iterPages()) { for (const item of page.getPaginatedItems()) { yield item; @@ -758,7 +793,7 @@ export class PagePromise< * console.log(item) * } */ - async *[Symbol.asyncIterator]() { + async *[Symbol.asyncIterator](): AsyncGenerator { const page = await this; for await (const item of page) { yield item; @@ -806,6 +841,7 @@ export type RequestOptions< signal?: AbortSignal | undefined | null; idempotencyKey?: string; + __metadata?: Record; __binaryRequest?: boolean | undefined; __binaryResponse?: boolean | undefined; __streamClass?: typeof Stream; @@ -828,6 +864,7 @@ const requestOptionsKeys: KeysEnum = { signal: true, idempotencyKey: true, + __metadata: true, __binaryRequest: true, __binaryResponse: true, __streamClass: true, @@ -1015,8 +1052,8 @@ export const safeJSON = (text: string) => { } }; -// https://stackoverflow.com/a/19709846 -const startsWithSchemeRegexp = new RegExp('^(?:[a-z]+:)?//', 'i'); +// https://url.spec.whatwg.org/#url-scheme-string +const startsWithSchemeRegexp = /^[a-z][a-z0-9+.-]*:/i; const isAbsoluteURL = (url: string): boolean => { return startsWithSchemeRegexp.test(url); }; @@ -1140,9 +1177,43 @@ function applyHeadersMut(targetHeaders: Headers, newHeaders: Headers): void { } } +const SENSITIVE_HEADERS = new Set(['authorization', 'api-key']); + export function debug(action: string, ...args: any[]) { if (typeof process !== 'undefined' && process?.env?.['DEBUG'] === 'true') { - console.log(`OpenAI:DEBUG:${action}`, ...args); + const modifiedArgs = args.map((arg) => { + if (!arg) { + return arg; + } + + // Check for sensitive headers in request body 'headers' object + if (arg['headers']) { + // clone so we don't mutate + const modifiedArg = { ...arg, headers: { ...arg['headers'] } }; + + for (const header in arg['headers']) { + if (SENSITIVE_HEADERS.has(header.toLowerCase())) { + modifiedArg['headers'][header] = 'REDACTED'; + } + } + + return modifiedArg; + } + + let modifiedArg = null; + + // Check for sensitive headers in headers object + for (const header in arg) { + if (SENSITIVE_HEADERS.has(header.toLowerCase())) { + // avoid making a copy until we need to + modifiedArg ??= { ...arg }; + modifiedArg[header] = 'REDACTED'; + } + } + + return modifiedArg ?? arg; + }); + console.log(`OpenAI:DEBUG:${action}`, ...modifiedArgs); } } @@ -1230,6 +1301,30 @@ export const toBase64 = (str: string | null | undefined): string => { throw new OpenAIError('Cannot generate b64 string; Expected `Buffer` or `btoa` to be defined'); }; +/** + * Converts a Base64 encoded string to a Float32Array. + * @param base64Str - The Base64 encoded string. + * @returns An Array of numbers interpreted as Float32 values. + */ +export const toFloat32Array = (base64Str: string): Array => { + if (typeof Buffer !== 'undefined') { + // for Node.js environment + const buf = Buffer.from(base64Str, 'base64'); + return Array.from( + new Float32Array(buf.buffer, buf.byteOffset, buf.length / Float32Array.BYTES_PER_ELEMENT), + ); + } else { + // for legacy web platform APIs + const binaryStr = atob(base64Str); + const len = binaryStr.length; + const bytes = new Uint8Array(len); + for (let i = 0; i < len; i++) { + bytes[i] = binaryStr.charCodeAt(i); + } + return Array.from(new Float32Array(bytes.buffer)); + } +}; + export function isObj(obj: unknown): obj is Record { return obj != null && typeof obj === 'object' && !Array.isArray(obj); } diff --git a/src/error.ts b/src/error.ts index 87eeea046..f3dc57610 100644 --- a/src/error.ts +++ b/src/error.ts @@ -4,10 +4,17 @@ import { castToError, Headers } from './core'; export class OpenAIError extends Error {} -export class APIError extends OpenAIError { - readonly status: number | undefined; - readonly headers: Headers | undefined; - readonly error: Object | undefined; +export class APIError< + TStatus extends number | undefined = number | undefined, + THeaders extends Headers | undefined = Headers | undefined, + TError extends Object | undefined = Object | undefined, +> extends OpenAIError { + /** HTTP status for the response that caused the error */ + readonly status: TStatus; + /** HTTP headers for the response that caused the error */ + readonly headers: THeaders; + /** JSON body of the response that caused the error */ + readonly error: TError; readonly code: string | null | undefined; readonly param: string | null | undefined; @@ -15,19 +22,14 @@ export class APIError extends OpenAIError { readonly request_id: string | null | undefined; - constructor( - status: number | undefined, - error: Object | undefined, - message: string | undefined, - headers: Headers | undefined, - ) { + constructor(status: TStatus, error: TError, message: string | undefined, headers: THeaders) { super(`${APIError.makeMessage(status, error, message)}`); this.status = status; this.headers = headers; this.request_id = headers?.['x-request-id']; + this.error = error; const data = error as Record; - this.error = data; this.code = data?.['code']; this.param = data?.['param']; this.type = data?.['type']; @@ -59,8 +61,8 @@ export class APIError extends OpenAIError { errorResponse: Object | undefined, message: string | undefined, headers: Headers | undefined, - ) { - if (!status) { + ): APIError { + if (!status || !headers) { return new APIConnectionError({ message, cause: castToError(errorResponse) }); } @@ -102,17 +104,13 @@ export class APIError extends OpenAIError { } } -export class APIUserAbortError extends APIError { - override readonly status: undefined = undefined; - +export class APIUserAbortError extends APIError { constructor({ message }: { message?: string } = {}) { super(undefined, undefined, message || 'Request was aborted.', undefined); } } -export class APIConnectionError extends APIError { - override readonly status: undefined = undefined; - +export class APIConnectionError extends APIError { constructor({ message, cause }: { message?: string | undefined; cause?: Error | undefined }) { super(undefined, undefined, message || 'Connection error.', undefined); // in some environments the 'cause' property is already declared @@ -127,35 +125,21 @@ export class APIConnectionTimeoutError extends APIConnectionError { } } -export class BadRequestError extends APIError { - override readonly status: 400 = 400; -} +export class BadRequestError extends APIError<400, Headers> {} -export class AuthenticationError extends APIError { - override readonly status: 401 = 401; -} +export class AuthenticationError extends APIError<401, Headers> {} -export class PermissionDeniedError extends APIError { - override readonly status: 403 = 403; -} +export class PermissionDeniedError extends APIError<403, Headers> {} -export class NotFoundError extends APIError { - override readonly status: 404 = 404; -} +export class NotFoundError extends APIError<404, Headers> {} -export class ConflictError extends APIError { - override readonly status: 409 = 409; -} +export class ConflictError extends APIError<409, Headers> {} -export class UnprocessableEntityError extends APIError { - override readonly status: 422 = 422; -} +export class UnprocessableEntityError extends APIError<422, Headers> {} -export class RateLimitError extends APIError { - override readonly status: 429 = 429; -} +export class RateLimitError extends APIError<429, Headers> {} -export class InternalServerError extends APIError {} +export class InternalServerError extends APIError {} export class LengthFinishReasonError extends OpenAIError { constructor() { diff --git a/src/helpers/audio.ts b/src/helpers/audio.ts new file mode 100644 index 000000000..f1a6ea371 --- /dev/null +++ b/src/helpers/audio.ts @@ -0,0 +1,145 @@ +import { File } from 'formdata-node'; +import { spawn } from 'node:child_process'; +import { Readable } from 'node:stream'; +import { platform, versions } from 'node:process'; +import { Response } from 'openai/_shims'; + +const DEFAULT_SAMPLE_RATE = 24000; +const DEFAULT_CHANNELS = 1; + +const isNode = Boolean(versions?.node); + +const recordingProviders: Record = { + win32: 'dshow', + darwin: 'avfoundation', + linux: 'alsa', + aix: 'alsa', + android: 'alsa', + freebsd: 'alsa', + haiku: 'alsa', + sunos: 'alsa', + netbsd: 'alsa', + openbsd: 'alsa', + cygwin: 'dshow', +}; + +function isResponse(stream: NodeJS.ReadableStream | Response | File): stream is Response { + return typeof (stream as any).body !== 'undefined'; +} + +function isFile(stream: NodeJS.ReadableStream | Response | File): stream is File { + return stream instanceof File; +} + +async function nodejsPlayAudio(stream: NodeJS.ReadableStream | Response | File): Promise { + return new Promise((resolve, reject) => { + try { + const ffplay = spawn('ffplay', ['-autoexit', '-nodisp', '-i', 'pipe:0']); + + if (isResponse(stream)) { + stream.body.pipe(ffplay.stdin); + } else if (isFile(stream)) { + Readable.from(stream.stream()).pipe(ffplay.stdin); + } else { + stream.pipe(ffplay.stdin); + } + + ffplay.on('close', (code: number) => { + if (code !== 0) { + reject(new Error(`ffplay process exited with code ${code}`)); + } + resolve(); + }); + } catch (error) { + reject(error); + } + }); +} + +export async function playAudio(input: NodeJS.ReadableStream | Response | File): Promise { + if (isNode) { + return nodejsPlayAudio(input); + } + + throw new Error( + 'Play audio is not supported in the browser yet. Check out https://npm.im/wavtools as an alternative.', + ); +} + +type RecordAudioOptions = { + signal?: AbortSignal; + device?: number; + timeout?: number; +}; + +function nodejsRecordAudio({ signal, device, timeout }: RecordAudioOptions = {}): Promise { + return new Promise((resolve, reject) => { + const data: any[] = []; + const provider = recordingProviders[platform]; + try { + const ffmpeg = spawn( + 'ffmpeg', + [ + '-f', + provider, + '-i', + `:${device ?? 0}`, // default audio input device; adjust as needed + '-ar', + DEFAULT_SAMPLE_RATE.toString(), + '-ac', + DEFAULT_CHANNELS.toString(), + '-f', + 'wav', + 'pipe:1', + ], + { + stdio: ['ignore', 'pipe', 'pipe'], + }, + ); + + ffmpeg.stdout.on('data', (chunk) => { + data.push(chunk); + }); + + ffmpeg.on('error', (error) => { + console.error(error); + reject(error); + }); + + ffmpeg.on('close', (code) => { + returnData(); + }); + + function returnData() { + const audioBuffer = Buffer.concat(data); + const audioFile = new File([audioBuffer], 'audio.wav', { type: 'audio/wav' }); + resolve(audioFile); + } + + if (typeof timeout === 'number' && timeout > 0) { + const internalSignal = AbortSignal.timeout(timeout); + internalSignal.addEventListener('abort', () => { + ffmpeg.kill('SIGTERM'); + }); + } + + if (signal) { + signal.addEventListener('abort', () => { + ffmpeg.kill('SIGTERM'); + }); + } + } catch (error) { + reject(error); + } + }); +} + +export async function recordAudio(options: RecordAudioOptions = {}) { + if (isNode) { + return nodejsRecordAudio(options); + } + + throw new Error( + 'Record audio is not supported in the browser. Check out https://npm.im/wavtools as an alternative.', + ); +} diff --git a/src/helpers/zod.ts b/src/helpers/zod.ts index 99b9eb4b0..de4c3ba93 100644 --- a/src/helpers/zod.ts +++ b/src/helpers/zod.ts @@ -2,11 +2,15 @@ import { ResponseFormatJSONSchema } from '../resources/index'; import type { infer as zodInfer, ZodType } from 'zod'; import { AutoParseableResponseFormat, + AutoParseableTextFormat, AutoParseableTool, makeParseableResponseFormat, + makeParseableTextFormat, makeParseableTool, } from '../lib/parser'; import { zodToJsonSchema as _zodToJsonSchema } from '../_vendor/zod-to-json-schema'; +import { AutoParseableResponseTool, makeParseableResponseTool } from '../lib/ResponsesParser'; +import { type ResponseFormatTextJSONSchemaConfig } from '../resources/responses/responses'; function zodToJsonSchema(schema: ZodType, options: { name: string }): Record { return _zodToJsonSchema(schema, { @@ -74,6 +78,23 @@ export function zodResponseFormat( ); } +export function zodTextFormat( + zodObject: ZodInput, + name: string, + props?: Omit, +): AutoParseableTextFormat> { + return makeParseableTextFormat( + { + type: 'json_schema', + ...props, + name, + strict: true, + schema: zodToJsonSchema(zodObject, { name }), + }, + (content) => zodObject.parse(JSON.parse(content)), + ); +} + /** * Creates a chat completion `function` tool that can be invoked * automatically by the chat completion `.runTools()` method or automatically @@ -106,3 +127,28 @@ export function zodFunction(options: { }, ); } + +export function zodResponsesFunction(options: { + name: string; + parameters: Parameters; + function?: ((args: zodInfer) => unknown | Promise) | undefined; + description?: string | undefined; +}): AutoParseableResponseTool<{ + arguments: Parameters; + name: string; + function: (args: zodInfer) => unknown; +}> { + return makeParseableResponseTool( + { + type: 'function', + name: options.name, + parameters: zodToJsonSchema(options.parameters, { name: options.name }), + strict: true, + ...(options.description ? { description: options.description } : undefined), + }, + { + callback: options.function, + parser: (args) => options.parameters.parse(JSON.parse(args)), + }, + ); +} diff --git a/src/index.ts b/src/index.ts index d3e1d2a78..537c18f43 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,12 +1,152 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import * as Errors from './error'; -import * as Uploads from './uploads'; import { type Agent, type RequestInit } from './_shims/index'; import * as qs from './internal/qs'; import * as Core from './core'; +import * as Errors from './error'; import * as Pagination from './pagination'; +import { type CursorPageParams, CursorPageResponse, PageResponse } from './pagination'; +import * as Uploads from './uploads'; import * as API from './resources/index'; +import { + Batch, + BatchCreateParams, + BatchError, + BatchListParams, + BatchRequestCounts, + Batches, + BatchesPage, +} from './resources/batches'; +import { + Completion, + CompletionChoice, + CompletionCreateParams, + CompletionCreateParamsNonStreaming, + CompletionCreateParamsStreaming, + CompletionUsage, + Completions, +} from './resources/completions'; +import { + CreateEmbeddingResponse, + Embedding, + EmbeddingCreateParams, + EmbeddingModel, + Embeddings, +} from './resources/embeddings'; +import { + FileContent, + FileCreateParams, + FileDeleted, + FileListParams, + FileObject, + FileObjectsPage, + FilePurpose, + Files, +} from './resources/files'; +import { + Image, + ImageCreateVariationParams, + ImageEditParams, + ImageGenerateParams, + ImageModel, + Images, + ImagesResponse, +} from './resources/images'; +import { Model, ModelDeleted, Models, ModelsPage } from './resources/models'; +import { + Moderation, + ModerationCreateParams, + ModerationCreateResponse, + ModerationImageURLInput, + ModerationModel, + ModerationMultiModalInput, + ModerationTextInput, + Moderations, +} from './resources/moderations'; +import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio'; +import { Beta } from './resources/beta/beta'; +import { Chat } from './resources/chat/chat'; +import { + EvalCreateParams, + EvalCreateResponse, + EvalCustomDataSourceConfig, + EvalDeleteResponse, + EvalListParams, + EvalListResponse, + EvalListResponsesPage, + EvalRetrieveResponse, + EvalStoredCompletionsDataSourceConfig, + EvalUpdateParams, + EvalUpdateResponse, + Evals, +} from './resources/evals/evals'; +import { FineTuning } from './resources/fine-tuning/fine-tuning'; +import { Graders } from './resources/graders/graders'; +import { Responses } from './resources/responses/responses'; +import { + Upload, + UploadCompleteParams, + UploadCreateParams, + Uploads as UploadsAPIUploads, +} from './resources/uploads/uploads'; +import { + AutoFileChunkingStrategyParam, + FileChunkingStrategy, + FileChunkingStrategyParam, + OtherFileChunkingStrategyObject, + StaticFileChunkingStrategy, + StaticFileChunkingStrategyObject, + StaticFileChunkingStrategyObjectParam, + VectorStore, + VectorStoreCreateParams, + VectorStoreDeleted, + VectorStoreListParams, + VectorStoreSearchParams, + VectorStoreSearchResponse, + VectorStoreSearchResponsesPage, + VectorStoreUpdateParams, + VectorStores, + VectorStoresPage, +} from './resources/vector-stores/vector-stores'; +import { + ChatCompletion, + ChatCompletionAssistantMessageParam, + ChatCompletionAudio, + ChatCompletionAudioParam, + ChatCompletionChunk, + ChatCompletionContentPart, + ChatCompletionContentPartImage, + ChatCompletionContentPartInputAudio, + ChatCompletionContentPartRefusal, + ChatCompletionContentPartText, + ChatCompletionCreateParams, + ChatCompletionCreateParamsNonStreaming, + ChatCompletionCreateParamsStreaming, + ChatCompletionDeleted, + ChatCompletionDeveloperMessageParam, + ChatCompletionFunctionCallOption, + ChatCompletionFunctionMessageParam, + ChatCompletionListParams, + ChatCompletionMessage, + ChatCompletionMessageParam, + ChatCompletionMessageToolCall, + ChatCompletionModality, + ChatCompletionNamedToolChoice, + ChatCompletionPredictionContent, + ChatCompletionReasoningEffort, + ChatCompletionRole, + ChatCompletionStoreMessage, + ChatCompletionStreamOptions, + ChatCompletionSystemMessageParam, + ChatCompletionTokenLogprob, + ChatCompletionTool, + ChatCompletionToolChoiceOption, + ChatCompletionToolMessageParam, + ChatCompletionUpdateParams, + ChatCompletionUserMessageParam, + ChatCompletionsPage, + CreateChatCompletionRequestMessage, +} from './resources/chat/completions/completions'; export interface ClientOptions { /** @@ -38,7 +178,7 @@ export interface ClientOptions { * Note that request timeouts are retried by default, so in a worst-case scenario you may wait * much longer than this timeout before the promise succeeds or fails. */ - timeout?: number; + timeout?: number | undefined; /** * An HTTP agent used to manage HTTP(S) connections. @@ -46,7 +186,7 @@ export interface ClientOptions { * If not provided, an agent will be constructed by default in the Node.js environment, * otherwise no agent is used. */ - httpAgent?: Agent; + httpAgent?: Agent | undefined; /** * Specify a custom `fetch` function implementation. @@ -62,7 +202,7 @@ export interface ClientOptions { * * @default 2 */ - maxRetries?: number; + maxRetries?: number | undefined; /** * Default headers to include with every request to the API. @@ -70,7 +210,7 @@ export interface ClientOptions { * These can be removed in individual requests by explicitly setting the * header to `undefined` or `null` in request options. */ - defaultHeaders?: Core.Headers; + defaultHeaders?: Core.Headers | undefined; /** * Default query parameters to include with every request to the API. @@ -78,13 +218,13 @@ export interface ClientOptions { * These can be removed in individual requests by explicitly setting the * param to `undefined` in request options. */ - defaultQuery?: Core.DefaultQuery; + defaultQuery?: Core.DefaultQuery | undefined; /** * By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers. * Only set this option to `true` if you understand the risks and have appropriate mitigations in place. */ - dangerouslyAllowBrowser?: boolean; + dangerouslyAllowBrowser?: boolean | undefined; } /** @@ -163,9 +303,13 @@ export class OpenAI extends Core.APIClient { moderations: API.Moderations = new API.Moderations(this); models: API.Models = new API.Models(this); fineTuning: API.FineTuning = new API.FineTuning(this); + graders: API.Graders = new API.Graders(this); + vectorStores: API.VectorStores = new API.VectorStores(this); beta: API.Beta = new API.Beta(this); batches: API.Batches = new API.Batches(this); uploads: API.Uploads = new API.Uploads(this); + responses: API.Responses = new API.Responses(this); + evals: API.Evals = new API.Evals(this); protected override defaultQuery(): Core.DefaultQuery | undefined { return this._options.defaultQuery; @@ -209,134 +353,212 @@ export class OpenAI extends Core.APIClient { static fileFromPath = Uploads.fileFromPath; } -export const { - OpenAIError, - APIError, - APIConnectionError, - APIConnectionTimeoutError, - APIUserAbortError, - NotFoundError, - ConflictError, - RateLimitError, - BadRequestError, - AuthenticationError, - InternalServerError, - PermissionDeniedError, - UnprocessableEntityError, -} = Errors; - -export import toFile = Uploads.toFile; -export import fileFromPath = Uploads.fileFromPath; - -export namespace OpenAI { - export import RequestOptions = Core.RequestOptions; +OpenAI.Completions = Completions; +OpenAI.Chat = Chat; +OpenAI.ChatCompletionsPage = ChatCompletionsPage; +OpenAI.Embeddings = Embeddings; +OpenAI.Files = Files; +OpenAI.FileObjectsPage = FileObjectsPage; +OpenAI.Images = Images; +OpenAI.Audio = Audio; +OpenAI.Moderations = Moderations; +OpenAI.Models = Models; +OpenAI.ModelsPage = ModelsPage; +OpenAI.FineTuning = FineTuning; +OpenAI.Graders = Graders; +OpenAI.VectorStores = VectorStores; +OpenAI.VectorStoresPage = VectorStoresPage; +OpenAI.VectorStoreSearchResponsesPage = VectorStoreSearchResponsesPage; +OpenAI.Beta = Beta; +OpenAI.Batches = Batches; +OpenAI.BatchesPage = BatchesPage; +OpenAI.Uploads = UploadsAPIUploads; +OpenAI.Responses = Responses; +OpenAI.Evals = Evals; +OpenAI.EvalListResponsesPage = EvalListResponsesPage; +export declare namespace OpenAI { + export type RequestOptions = Core.RequestOptions; export import Page = Pagination.Page; - export import PageResponse = Pagination.PageResponse; + export { type PageResponse as PageResponse }; export import CursorPage = Pagination.CursorPage; - export import CursorPageParams = Pagination.CursorPageParams; - export import CursorPageResponse = Pagination.CursorPageResponse; - - export import Completions = API.Completions; - export import Completion = API.Completion; - export import CompletionChoice = API.CompletionChoice; - export import CompletionUsage = API.CompletionUsage; - export import CompletionCreateParams = API.CompletionCreateParams; - export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; - export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; - - export import Chat = API.Chat; - export import ChatModel = API.ChatModel; - export import ChatCompletion = API.ChatCompletion; - export import ChatCompletionAssistantMessageParam = API.ChatCompletionAssistantMessageParam; - export import ChatCompletionChunk = API.ChatCompletionChunk; - export import ChatCompletionContentPart = API.ChatCompletionContentPart; - export import ChatCompletionContentPartImage = API.ChatCompletionContentPartImage; - export import ChatCompletionContentPartRefusal = API.ChatCompletionContentPartRefusal; - export import ChatCompletionContentPartText = API.ChatCompletionContentPartText; - export import ChatCompletionFunctionCallOption = API.ChatCompletionFunctionCallOption; - export import ChatCompletionFunctionMessageParam = API.ChatCompletionFunctionMessageParam; - export import ChatCompletionMessage = API.ChatCompletionMessage; - export import ChatCompletionMessageParam = API.ChatCompletionMessageParam; - export import ChatCompletionMessageToolCall = API.ChatCompletionMessageToolCall; - export import ChatCompletionNamedToolChoice = API.ChatCompletionNamedToolChoice; - export import ChatCompletionRole = API.ChatCompletionRole; - export import ChatCompletionStreamOptions = API.ChatCompletionStreamOptions; - export import ChatCompletionSystemMessageParam = API.ChatCompletionSystemMessageParam; - export import ChatCompletionTokenLogprob = API.ChatCompletionTokenLogprob; - export import ChatCompletionTool = API.ChatCompletionTool; - export import ChatCompletionToolChoiceOption = API.ChatCompletionToolChoiceOption; - export import ChatCompletionToolMessageParam = API.ChatCompletionToolMessageParam; - export import ChatCompletionUserMessageParam = API.ChatCompletionUserMessageParam; - export import ChatCompletionCreateParams = API.ChatCompletionCreateParams; - export import ChatCompletionCreateParamsNonStreaming = API.ChatCompletionCreateParamsNonStreaming; - export import ChatCompletionCreateParamsStreaming = API.ChatCompletionCreateParamsStreaming; - - export import Embeddings = API.Embeddings; - export import CreateEmbeddingResponse = API.CreateEmbeddingResponse; - export import Embedding = API.Embedding; - export import EmbeddingModel = API.EmbeddingModel; - export import EmbeddingCreateParams = API.EmbeddingCreateParams; - - export import Files = API.Files; - export import FileContent = API.FileContent; - export import FileDeleted = API.FileDeleted; - export import FileObject = API.FileObject; - export import FilePurpose = API.FilePurpose; - export import FileObjectsPage = API.FileObjectsPage; - export import FileCreateParams = API.FileCreateParams; - export import FileListParams = API.FileListParams; - - export import Images = API.Images; - export import Image = API.Image; - export import ImageModel = API.ImageModel; - export import ImagesResponse = API.ImagesResponse; - export import ImageCreateVariationParams = API.ImageCreateVariationParams; - export import ImageEditParams = API.ImageEditParams; - export import ImageGenerateParams = API.ImageGenerateParams; - - export import Audio = API.Audio; - export import AudioModel = API.AudioModel; - export import AudioResponseFormat = API.AudioResponseFormat; - - export import Moderations = API.Moderations; - export import Moderation = API.Moderation; - export import ModerationImageURLInput = API.ModerationImageURLInput; - export import ModerationModel = API.ModerationModel; - export import ModerationMultiModalInput = API.ModerationMultiModalInput; - export import ModerationTextInput = API.ModerationTextInput; - export import ModerationCreateResponse = API.ModerationCreateResponse; - export import ModerationCreateParams = API.ModerationCreateParams; - - export import Models = API.Models; - export import Model = API.Model; - export import ModelDeleted = API.ModelDeleted; - export import ModelsPage = API.ModelsPage; - - export import FineTuning = API.FineTuning; - - export import Beta = API.Beta; - - export import Batches = API.Batches; - export import Batch = API.Batch; - export import BatchError = API.BatchError; - export import BatchRequestCounts = API.BatchRequestCounts; - export import BatchesPage = API.BatchesPage; - export import BatchCreateParams = API.BatchCreateParams; - export import BatchListParams = API.BatchListParams; - - export import Uploads = API.Uploads; - export import Upload = API.Upload; - export import UploadCreateParams = API.UploadCreateParams; - export import UploadCompleteParams = API.UploadCompleteParams; - - export import ErrorObject = API.ErrorObject; - export import FunctionDefinition = API.FunctionDefinition; - export import FunctionParameters = API.FunctionParameters; - export import ResponseFormatJSONObject = API.ResponseFormatJSONObject; - export import ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; - export import ResponseFormatText = API.ResponseFormatText; + export { type CursorPageParams as CursorPageParams, type CursorPageResponse as CursorPageResponse }; + + export { + Completions as Completions, + type Completion as Completion, + type CompletionChoice as CompletionChoice, + type CompletionUsage as CompletionUsage, + type CompletionCreateParams as CompletionCreateParams, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + }; + + export { + Chat as Chat, + type ChatCompletion as ChatCompletion, + type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, + type ChatCompletionAudio as ChatCompletionAudio, + type ChatCompletionAudioParam as ChatCompletionAudioParam, + type ChatCompletionChunk as ChatCompletionChunk, + type ChatCompletionContentPart as ChatCompletionContentPart, + type ChatCompletionContentPartImage as ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeleted as ChatCompletionDeleted, + type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, + type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, + type ChatCompletionMessage as ChatCompletionMessage, + type ChatCompletionMessageParam as ChatCompletionMessageParam, + type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, + type ChatCompletionModality as ChatCompletionModality, + type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent as ChatCompletionPredictionContent, + type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStoreMessage as ChatCompletionStoreMessage, + type ChatCompletionStreamOptions as ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, + type ChatCompletionTool as ChatCompletionTool, + type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, + ChatCompletionsPage as ChatCompletionsPage, + type ChatCompletionCreateParams as ChatCompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, + type ChatCompletionUpdateParams as ChatCompletionUpdateParams, + type ChatCompletionListParams as ChatCompletionListParams, + }; + + export { + Embeddings as Embeddings, + type CreateEmbeddingResponse as CreateEmbeddingResponse, + type Embedding as Embedding, + type EmbeddingModel as EmbeddingModel, + type EmbeddingCreateParams as EmbeddingCreateParams, + }; + + export { + Files as Files, + type FileContent as FileContent, + type FileDeleted as FileDeleted, + type FileObject as FileObject, + type FilePurpose as FilePurpose, + FileObjectsPage as FileObjectsPage, + type FileCreateParams as FileCreateParams, + type FileListParams as FileListParams, + }; + + export { + Images as Images, + type Image as Image, + type ImageModel as ImageModel, + type ImagesResponse as ImagesResponse, + type ImageCreateVariationParams as ImageCreateVariationParams, + type ImageEditParams as ImageEditParams, + type ImageGenerateParams as ImageGenerateParams, + }; + + export { Audio as Audio, type AudioModel as AudioModel, type AudioResponseFormat as AudioResponseFormat }; + + export { + Moderations as Moderations, + type Moderation as Moderation, + type ModerationImageURLInput as ModerationImageURLInput, + type ModerationModel as ModerationModel, + type ModerationMultiModalInput as ModerationMultiModalInput, + type ModerationTextInput as ModerationTextInput, + type ModerationCreateResponse as ModerationCreateResponse, + type ModerationCreateParams as ModerationCreateParams, + }; + + export { + Models as Models, + type Model as Model, + type ModelDeleted as ModelDeleted, + ModelsPage as ModelsPage, + }; + + export { FineTuning as FineTuning }; + + export { Graders as Graders }; + + export { + VectorStores as VectorStores, + type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, + type FileChunkingStrategy as FileChunkingStrategy, + type FileChunkingStrategyParam as FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy as StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, + type VectorStore as VectorStore, + type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreSearchResponse as VectorStoreSearchResponse, + VectorStoresPage as VectorStoresPage, + VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, + type VectorStoreCreateParams as VectorStoreCreateParams, + type VectorStoreUpdateParams as VectorStoreUpdateParams, + type VectorStoreListParams as VectorStoreListParams, + type VectorStoreSearchParams as VectorStoreSearchParams, + }; + + export { Beta as Beta }; + + export { + Batches as Batches, + type Batch as Batch, + type BatchError as BatchError, + type BatchRequestCounts as BatchRequestCounts, + BatchesPage as BatchesPage, + type BatchCreateParams as BatchCreateParams, + type BatchListParams as BatchListParams, + }; + + export { + UploadsAPIUploads as Uploads, + type Upload as Upload, + type UploadCreateParams as UploadCreateParams, + type UploadCompleteParams as UploadCompleteParams, + }; + + export { Responses as Responses }; + + export { + Evals as Evals, + type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, + type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, + type EvalCreateResponse as EvalCreateResponse, + type EvalRetrieveResponse as EvalRetrieveResponse, + type EvalUpdateResponse as EvalUpdateResponse, + type EvalListResponse as EvalListResponse, + type EvalDeleteResponse as EvalDeleteResponse, + EvalListResponsesPage as EvalListResponsesPage, + type EvalCreateParams as EvalCreateParams, + type EvalUpdateParams as EvalUpdateParams, + type EvalListParams as EvalListParams, + }; + + export type AllModels = API.AllModels; + export type ChatModel = API.ChatModel; + export type ComparisonFilter = API.ComparisonFilter; + export type CompoundFilter = API.CompoundFilter; + export type ErrorObject = API.ErrorObject; + export type FunctionDefinition = API.FunctionDefinition; + export type FunctionParameters = API.FunctionParameters; + export type Metadata = API.Metadata; + export type Reasoning = API.Reasoning; + export type ReasoningEffort = API.ReasoningEffort; + export type ResponseFormatJSONObject = API.ResponseFormatJSONObject; + export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; + export type ResponseFormatText = API.ResponseFormatText; + export type ResponsesModel = API.ResponsesModel; } // ---------------------- Azure ---------------------- @@ -374,7 +596,7 @@ export interface AzureClientOptions extends ClientOptions { /** API Client for interfacing with the Azure OpenAI API. */ export class AzureOpenAI extends OpenAI { private _azureADTokenProvider: (() => Promise) | undefined; - private _deployment: string | undefined; + deploymentName: string | undefined; apiVersion: string = ''; /** * API Client for interfacing with the Azure OpenAI API. @@ -457,10 +679,13 @@ export class AzureOpenAI extends OpenAI { this._azureADTokenProvider = azureADTokenProvider; this.apiVersion = apiVersion; - this._deployment = deployment; + this.deploymentName = deployment; } - override buildRequest(options: Core.FinalRequestOptions): { + override buildRequest( + options: Core.FinalRequestOptions, + props: { retryCount?: number } = {}, + ): { req: RequestInit; url: string; timeout: number; @@ -469,15 +694,15 @@ export class AzureOpenAI extends OpenAI { if (!Core.isObj(options.body)) { throw new Error('Expected request body to be an object'); } - const model = this._deployment || options.body['model']; + const model = this.deploymentName || options.body['model'] || options.__metadata?.['model']; if (model !== undefined && !this.baseURL.includes('/deployments')) { options.path = `/deployments/${model}${options.path}`; } } - return super.buildRequest(options); + return super.buildRequest(options, props); } - private async _getAzureADToken(): Promise { + async _getAzureADToken(): Promise { if (typeof this._azureADTokenProvider === 'function') { const token = await this._azureADTokenProvider(); if (!token || typeof token !== 'string') { @@ -531,4 +756,21 @@ const API_KEY_SENTINEL = ''; // ---------------------- End Azure ---------------------- +export { toFile, fileFromPath } from './uploads'; +export { + OpenAIError, + APIError, + APIConnectionError, + APIConnectionTimeoutError, + APIUserAbortError, + NotFoundError, + ConflictError, + RateLimitError, + BadRequestError, + AuthenticationError, + InternalServerError, + PermissionDeniedError, + UnprocessableEntityError, +} from './error'; + export default OpenAI; diff --git a/src/internal/decoders/line.ts b/src/internal/decoders/line.ts index 1e0bbf390..947f240b3 100644 --- a/src/internal/decoders/line.ts +++ b/src/internal/decoders/line.ts @@ -1,6 +1,6 @@ import { OpenAIError } from '../../error'; -type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; +export type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; /** * A re-implementation of httpx's `LineDecoder` in Python that handles incrementally @@ -13,52 +13,58 @@ export class LineDecoder { static NEWLINE_CHARS = new Set(['\n', '\r']); static NEWLINE_REGEXP = /\r\n|[\n\r]/g; - buffer: string[]; - trailingCR: boolean; + buffer: Uint8Array; + #carriageReturnIndex: number | null; textDecoder: any; // TextDecoder found in browsers; not typed to avoid pulling in either "dom" or "node" types. constructor() { - this.buffer = []; - this.trailingCR = false; + this.buffer = new Uint8Array(); + this.#carriageReturnIndex = null; } decode(chunk: Bytes): string[] { - let text = this.decodeText(chunk); - - if (this.trailingCR) { - text = '\r' + text; - this.trailingCR = false; - } - if (text.endsWith('\r')) { - this.trailingCR = true; - text = text.slice(0, -1); - } - - if (!text) { + if (chunk == null) { return []; } - const trailingNewline = LineDecoder.NEWLINE_CHARS.has(text[text.length - 1] || ''); - let lines = text.split(LineDecoder.NEWLINE_REGEXP); + const binaryChunk = + chunk instanceof ArrayBuffer ? new Uint8Array(chunk) + : typeof chunk === 'string' ? new TextEncoder().encode(chunk) + : chunk; + + let newData = new Uint8Array(this.buffer.length + binaryChunk.length); + newData.set(this.buffer); + newData.set(binaryChunk, this.buffer.length); + this.buffer = newData; + + const lines: string[] = []; + let patternIndex; + while ((patternIndex = findNewlineIndex(this.buffer, this.#carriageReturnIndex)) != null) { + if (patternIndex.carriage && this.#carriageReturnIndex == null) { + // skip until we either get a corresponding `\n`, a new `\r` or nothing + this.#carriageReturnIndex = patternIndex.index; + continue; + } - // if there is a trailing new line then the last entry will be an empty - // string which we don't care about - if (trailingNewline) { - lines.pop(); - } + // we got double \r or \rtext\n + if ( + this.#carriageReturnIndex != null && + (patternIndex.index !== this.#carriageReturnIndex + 1 || patternIndex.carriage) + ) { + lines.push(this.decodeText(this.buffer.slice(0, this.#carriageReturnIndex - 1))); + this.buffer = this.buffer.slice(this.#carriageReturnIndex); + this.#carriageReturnIndex = null; + continue; + } - if (lines.length === 1 && !trailingNewline) { - this.buffer.push(lines[0]!); - return []; - } + const endIndex = + this.#carriageReturnIndex !== null ? patternIndex.preceding - 1 : patternIndex.preceding; - if (this.buffer.length > 0) { - lines = [this.buffer.join('') + lines[0], ...lines.slice(1)]; - this.buffer = []; - } + const line = this.decodeText(this.buffer.slice(0, endIndex)); + lines.push(line); - if (!trailingNewline) { - this.buffer = [lines.pop() || '']; + this.buffer = this.buffer.slice(patternIndex.index); + this.#carriageReturnIndex = null; } return lines; @@ -102,13 +108,69 @@ export class LineDecoder { } flush(): string[] { - if (!this.buffer.length && !this.trailingCR) { + if (!this.buffer.length) { return []; } + return this.decode('\n'); + } +} - const lines = [this.buffer.join('')]; - this.buffer = []; - this.trailingCR = false; - return lines; +/** + * This function searches the buffer for the end patterns, (\r or \n) + * and returns an object with the index preceding the matched newline and the + * index after the newline char. `null` is returned if no new line is found. + * + * ```ts + * findNewLineIndex('abc\ndef') -> { preceding: 2, index: 3 } + * ``` + */ +function findNewlineIndex( + buffer: Uint8Array, + startIndex: number | null, +): { preceding: number; index: number; carriage: boolean } | null { + const newline = 0x0a; // \n + const carriage = 0x0d; // \r + + for (let i = startIndex ?? 0; i < buffer.length; i++) { + if (buffer[i] === newline) { + return { preceding: i, index: i + 1, carriage: false }; + } + + if (buffer[i] === carriage) { + return { preceding: i, index: i + 1, carriage: true }; + } } + + return null; +} + +export function findDoubleNewlineIndex(buffer: Uint8Array): number { + // This function searches the buffer for the end patterns (\r\r, \n\n, \r\n\r\n) + // and returns the index right after the first occurrence of any pattern, + // or -1 if none of the patterns are found. + const newline = 0x0a; // \n + const carriage = 0x0d; // \r + + for (let i = 0; i < buffer.length - 1; i++) { + if (buffer[i] === newline && buffer[i + 1] === newline) { + // \n\n + return i + 2; + } + if (buffer[i] === carriage && buffer[i + 1] === carriage) { + // \r\r + return i + 2; + } + if ( + buffer[i] === carriage && + buffer[i + 1] === newline && + i + 3 < buffer.length && + buffer[i + 2] === carriage && + buffer[i + 3] === newline + ) { + // \r\n\r\n + return i + 4; + } + } + + return -1; } diff --git a/src/internal/stream-utils.ts b/src/internal/stream-utils.ts new file mode 100644 index 000000000..37f7793cf --- /dev/null +++ b/src/internal/stream-utils.ts @@ -0,0 +1,32 @@ +/** + * Most browsers don't yet have async iterable support for ReadableStream, + * and Node has a very different way of reading bytes from its "ReadableStream". + * + * This polyfill was pulled from https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490 + */ +export function ReadableStreamToAsyncIterable(stream: any): AsyncIterableIterator { + if (stream[Symbol.asyncIterator]) return stream; + + const reader = stream.getReader(); + return { + async next() { + try { + const result = await reader.read(); + if (result?.done) reader.releaseLock(); // release lock when stream becomes closed + return result; + } catch (e) { + reader.releaseLock(); // release lock when stream becomes errored + throw e; + } + }, + async return() { + const cancelPromise = reader.cancel(); + reader.releaseLock(); + await cancelPromise; + return { done: true, value: undefined }; + }, + [Symbol.asyncIterator]() { + return this; + }, + }; +} diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts index 39ee4e993..406f5a431 100644 --- a/src/lib/AbstractChatCompletionRunner.ts +++ b/src/lib/AbstractChatCompletionRunner.ts @@ -1,13 +1,13 @@ -import * as Core from 'openai/core'; -import { type CompletionUsage } from 'openai/resources/completions'; +import * as Core from '../core'; +import { type CompletionUsage } from '../resources/completions'; import { type ChatCompletion, type ChatCompletionMessage, type ChatCompletionMessageParam, type ChatCompletionCreateParams, type ChatCompletionTool, -} from 'openai/resources/chat/completions'; -import { OpenAIError } from 'openai/error'; +} from '../resources/chat/completions'; +import { OpenAIError } from '../error'; import { type RunnableFunction, isRunnableFunctionWithParse, @@ -23,7 +23,7 @@ import { isAssistantMessage, isFunctionMessage, isToolMessage } from './chatComp import { BaseEvents, EventStream } from './EventStream'; import { ParsedChatCompletion } from '../resources/beta/chat/completions'; import OpenAI from '../index'; -import { isAutoParsableTool, parseChatCompletion } from 'openai/lib/parser'; +import { isAutoParsableTool, parseChatCompletion } from '../lib/parser'; const DEFAULT_MAX_CHAT_COMPLETIONS = 10; export interface RunnerOptions extends Core.RequestOptions { @@ -105,7 +105,9 @@ export class AbstractChatCompletionRunner< const message = this.messages[i]; if (isAssistantMessage(message)) { const { function_call, ...rest } = message; - const ret: ChatCompletionMessage = { + + // TODO: support audio here + const ret: Omit = { ...rest, content: (message as ChatCompletionMessage).content ?? null, refusal: (message as ChatCompletionMessage).refusal ?? null, diff --git a/src/lib/AssistantStream.ts b/src/lib/AssistantStream.ts index 7c5ffb58e..9b6cc20c5 100644 --- a/src/lib/AssistantStream.ts +++ b/src/lib/AssistantStream.ts @@ -6,11 +6,11 @@ import { Text, ImageFile, TextDelta, - Messages, + MessageDelta, MessageContent, -} from 'openai/resources/beta/threads/messages'; -import * as Core from 'openai/core'; -import { RequestOptions } from 'openai/core'; +} from '../resources/beta/threads/messages'; +import * as Core from '../core'; +import { RequestOptions } from '../core'; import { Run, RunCreateParamsBase, @@ -18,20 +18,19 @@ import { Runs, RunSubmitToolOutputsParamsBase, RunSubmitToolOutputsParamsStreaming, -} from 'openai/resources/beta/threads/runs/runs'; -import { type ReadableStream } from 'openai/_shims/index'; -import { Stream } from 'openai/streaming'; -import { APIUserAbortError, OpenAIError } from 'openai/error'; +} from '../resources/beta/threads/runs/runs'; +import { type ReadableStream } from '../_shims/index'; +import { Stream } from '../streaming'; +import { APIUserAbortError, OpenAIError } from '../error'; import { AssistantStreamEvent, MessageStreamEvent, RunStepStreamEvent, RunStreamEvent, -} from 'openai/resources/beta/assistants'; -import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from 'openai/resources/beta/threads/runs/steps'; -import { ThreadCreateAndRunParamsBase, Threads } from 'openai/resources/beta/threads/threads'; +} from '../resources/beta/assistants'; +import { RunStep, RunStepDelta, ToolCall, ToolCallDelta } from '../resources/beta/threads/runs/steps'; +import { ThreadCreateAndRunParamsBase, Threads } from '../resources/beta/threads/threads'; import { BaseEvents, EventStream } from './EventStream'; -import MessageDelta = Messages.MessageDelta; export interface AssistantStreamEvents extends BaseEvents { run: (run: Run) => void; @@ -193,7 +192,7 @@ export class AssistantStream runs: Runs, params: RunSubmitToolOutputsParamsStream, options: RequestOptions | undefined, - ) { + ): AssistantStream { const runner = new AssistantStream(); runner._run(() => runner._runToolAssistantStream(threadId, runId, runs, params, { @@ -239,7 +238,7 @@ export class AssistantStream params: ThreadCreateAndRunParamsBaseStream, thread: Threads, options?: RequestOptions, - ) { + ): AssistantStream { const runner = new AssistantStream(); runner._run(() => runner._threadAssistantStream(params, thread, { @@ -255,7 +254,7 @@ export class AssistantStream runs: Runs, params: RunCreateParamsBaseStream, options?: RequestOptions, - ) { + ): AssistantStream { const runner = new AssistantStream(); runner._run(() => runner._runAssistantStream(threadId, runs, params, { @@ -371,6 +370,7 @@ export class AssistantStream case 'thread.run.in_progress': case 'thread.run.requires_action': case 'thread.run.completed': + case 'thread.run.incomplete': case 'thread.run.failed': case 'thread.run.cancelling': case 'thread.run.cancelled': @@ -401,6 +401,8 @@ export class AssistantStream throw new Error( 'Encountered an error event in event processing - errors should be processed earlier', ); + default: + assertNever(event); } } @@ -773,3 +775,5 @@ export class AssistantStream return await this._createToolAssistantStream(runs, threadId, runId, params, options); } } + +function assertNever(_x: never) {} diff --git a/src/lib/ChatCompletionRunner.ts b/src/lib/ChatCompletionRunner.ts index 0b962a110..9e68e6671 100644 --- a/src/lib/ChatCompletionRunner.ts +++ b/src/lib/ChatCompletionRunner.ts @@ -1,7 +1,7 @@ import { type ChatCompletionMessageParam, type ChatCompletionCreateParamsNonStreaming, -} from 'openai/resources/chat/completions'; +} from '../resources/chat/completions'; import { type RunnableFunctions, type BaseFunctionsArgs, RunnableTools } from './RunnableFunction'; import { AbstractChatCompletionRunner, @@ -9,8 +9,8 @@ import { RunnerOptions, } from './AbstractChatCompletionRunner'; import { isAssistantMessage } from './chatCompletionUtils'; -import OpenAI from 'openai/index'; -import { AutoParseableTool } from 'openai/lib/parser'; +import OpenAI from '../index'; +import { AutoParseableTool } from '../lib/parser'; export interface ChatCompletionRunnerEvents extends AbstractChatCompletionRunnerEvents { content: (content: string) => void; diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts index e3661c8c1..35648c27b 100644 --- a/src/lib/ChatCompletionStream.ts +++ b/src/lib/ChatCompletionStream.ts @@ -1,10 +1,10 @@ -import * as Core from 'openai/core'; +import * as Core from '../core'; import { OpenAIError, APIUserAbortError, LengthFinishReasonError, ContentFilterFinishReasonError, -} from 'openai/error'; +} from '../error'; import { ChatCompletionTokenLogprob, type ChatCompletion, @@ -12,15 +12,16 @@ import { type ChatCompletionCreateParams, type ChatCompletionCreateParamsStreaming, type ChatCompletionCreateParamsBase, -} from 'openai/resources/chat/completions'; + type ChatCompletionRole, +} from '../resources/chat/completions/completions'; import { AbstractChatCompletionRunner, type AbstractChatCompletionRunnerEvents, } from './AbstractChatCompletionRunner'; -import { type ReadableStream } from 'openai/_shims/index'; -import { Stream } from 'openai/streaming'; -import OpenAI from 'openai/index'; -import { ParsedChatCompletion } from 'openai/resources/beta/chat/completions'; +import { type ReadableStream } from '../_shims/index'; +import { Stream } from '../streaming'; +import OpenAI from '../index'; +import { ParsedChatCompletion } from '../resources/beta/chat/completions'; import { AutoParseableResponseFormat, hasAutoParseableInput, @@ -28,7 +29,7 @@ import { isAutoParsableTool, maybeParseChatCompletion, shouldParseToolCall, -} from 'openai/lib/parser'; +} from '../lib/parser'; import { partialParse } from '../_vendor/partial-json-parser/parser'; export interface ContentDeltaEvent { @@ -797,7 +798,7 @@ export namespace ChatCompletionSnapshot { /** * The role of the author of this message. */ - role?: 'system' | 'user' | 'assistant' | 'function' | 'tool'; + role?: ChatCompletionRole; } export namespace Message { diff --git a/src/lib/ChatCompletionStreamingRunner.ts b/src/lib/ChatCompletionStreamingRunner.ts index ea6c74116..ba0c6496f 100644 --- a/src/lib/ChatCompletionStreamingRunner.ts +++ b/src/lib/ChatCompletionStreamingRunner.ts @@ -1,13 +1,13 @@ import { type ChatCompletionChunk, type ChatCompletionCreateParamsStreaming, -} from 'openai/resources/chat/completions'; +} from '../resources/chat/completions'; import { RunnerOptions, type AbstractChatCompletionRunnerEvents } from './AbstractChatCompletionRunner'; -import { type ReadableStream } from 'openai/_shims/index'; +import { type ReadableStream } from '../_shims/index'; import { RunnableTools, type BaseFunctionsArgs, type RunnableFunctions } from './RunnableFunction'; import { ChatCompletionSnapshot, ChatCompletionStream } from './ChatCompletionStream'; -import OpenAI from 'openai/index'; -import { AutoParseableTool } from 'openai/lib/parser'; +import OpenAI from '../index'; +import { AutoParseableTool } from '../lib/parser'; export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents { content: (contentDelta: string, contentSnapshot: string) => void; diff --git a/src/lib/EventEmitter.ts b/src/lib/EventEmitter.ts new file mode 100644 index 000000000..9adeebdc3 --- /dev/null +++ b/src/lib/EventEmitter.ts @@ -0,0 +1,98 @@ +type EventListener = Events[EventType]; + +type EventListeners = Array<{ + listener: EventListener; + once?: boolean; +}>; + +export type EventParameters = { + [Event in EventType]: EventListener extends (...args: infer P) => any ? P : never; +}[EventType]; + +export class EventEmitter any>> { + #listeners: { + [Event in keyof EventTypes]?: EventListeners; + } = {}; + + /** + * Adds the listener function to the end of the listeners array for the event. + * No checks are made to see if the listener has already been added. Multiple calls passing + * the same combination of event and listener will result in the listener being added, and + * called, multiple times. + * @returns this, so that calls can be chained + */ + on(event: Event, listener: EventListener): this { + const listeners: EventListeners = + this.#listeners[event] || (this.#listeners[event] = []); + listeners.push({ listener }); + return this; + } + + /** + * Removes the specified listener from the listener array for the event. + * off() will remove, at most, one instance of a listener from the listener array. If any single + * listener has been added multiple times to the listener array for the specified event, then + * off() must be called multiple times to remove each instance. + * @returns this, so that calls can be chained + */ + off(event: Event, listener: EventListener): this { + const listeners = this.#listeners[event]; + if (!listeners) return this; + const index = listeners.findIndex((l) => l.listener === listener); + if (index >= 0) listeners.splice(index, 1); + return this; + } + + /** + * Adds a one-time listener function for the event. The next time the event is triggered, + * this listener is removed and then invoked. + * @returns this, so that calls can be chained + */ + once(event: Event, listener: EventListener): this { + const listeners: EventListeners = + this.#listeners[event] || (this.#listeners[event] = []); + listeners.push({ listener, once: true }); + return this; + } + + /** + * This is similar to `.once()`, but returns a Promise that resolves the next time + * the event is triggered, instead of calling a listener callback. + * @returns a Promise that resolves the next time given event is triggered, + * or rejects if an error is emitted. (If you request the 'error' event, + * returns a promise that resolves with the error). + * + * Example: + * + * const message = await stream.emitted('message') // rejects if the stream errors + */ + emitted( + event: Event, + ): Promise< + EventParameters extends [infer Param] ? Param + : EventParameters extends [] ? void + : EventParameters + > { + return new Promise((resolve, reject) => { + // TODO: handle errors + this.once(event, resolve as any); + }); + } + + protected _emit( + this: EventEmitter, + event: Event, + ...args: EventParameters + ) { + const listeners: EventListeners | undefined = this.#listeners[event]; + if (listeners) { + this.#listeners[event] = listeners.filter((l) => !l.once) as any; + listeners.forEach(({ listener }: any) => listener(...(args as any))); + } + } + + protected _hasListener(event: keyof EventTypes): boolean { + const listeners = this.#listeners[event]; + return listeners && listeners.length > 0; + } +} diff --git a/src/lib/EventStream.ts b/src/lib/EventStream.ts index a18c771dd..d3f485e9d 100644 --- a/src/lib/EventStream.ts +++ b/src/lib/EventStream.ts @@ -1,4 +1,4 @@ -import { APIUserAbortError, OpenAIError } from 'openai/error'; +import { APIUserAbortError, OpenAIError } from '../error'; export class EventStream { controller: AbortController = new AbortController(); diff --git a/src/lib/ResponsesParser.ts b/src/lib/ResponsesParser.ts new file mode 100644 index 000000000..c64c6ffa0 --- /dev/null +++ b/src/lib/ResponsesParser.ts @@ -0,0 +1,262 @@ +import { OpenAIError } from '../error'; +import type { ChatCompletionTool } from '../resources/chat/completions'; +import { + type FunctionTool, + type ParsedContent, + type ParsedResponse, + type ParsedResponseFunctionToolCall, + type ParsedResponseOutputItem, + type Response, + type ResponseCreateParamsBase, + type ResponseCreateParamsNonStreaming, + type ResponseFunctionToolCall, + type Tool, +} from '../resources/responses/responses'; +import { type AutoParseableTextFormat, isAutoParsableResponseFormat } from '../lib/parser'; + +export type ParseableToolsParams = Array | ChatCompletionTool | null; + +export type ResponseCreateParamsWithTools = ResponseCreateParamsBase & { + tools?: ParseableToolsParams; +}; + +export type ExtractParsedContentFromParams = + NonNullable['format'] extends AutoParseableTextFormat ? P : null; + +export function maybeParseResponse< + Params extends ResponseCreateParamsBase | null, + ParsedT = Params extends null ? null : ExtractParsedContentFromParams>, +>(response: Response, params: Params): ParsedResponse { + if (!params || !hasAutoParseableInput(params)) { + return { + ...response, + output_parsed: null, + output: response.output.map((item) => { + if (item.type === 'function_call') { + return { + ...item, + parsed_arguments: null, + }; + } + + if (item.type === 'message') { + return { + ...item, + content: item.content.map((content) => ({ + ...content, + parsed: null, + })), + }; + } else { + return item; + } + }), + }; + } + + return parseResponse(response, params); +} + +export function parseResponse< + Params extends ResponseCreateParamsBase, + ParsedT = ExtractParsedContentFromParams, +>(response: Response, params: Params): ParsedResponse { + const output: Array> = response.output.map( + (item): ParsedResponseOutputItem => { + if (item.type === 'function_call') { + return { + ...item, + parsed_arguments: parseToolCall(params, item), + }; + } + if (item.type === 'message') { + const content: Array> = item.content.map((content) => { + if (content.type === 'output_text') { + return { + ...content, + parsed: parseTextFormat(params, content.text), + }; + } + + return content; + }); + + return { + ...item, + content, + }; + } + + return item; + }, + ); + + const parsed: Omit, 'output_parsed'> = Object.assign({}, response, { output }); + if (!Object.getOwnPropertyDescriptor(response, 'output_text')) { + addOutputText(parsed); + } + + Object.defineProperty(parsed, 'output_parsed', { + enumerable: true, + get() { + for (const output of parsed.output) { + if (output.type !== 'message') { + continue; + } + + for (const content of output.content) { + if (content.type === 'output_text' && content.parsed !== null) { + return content.parsed; + } + } + } + + return null; + }, + }); + + return parsed as ParsedResponse; +} + +function parseTextFormat< + Params extends ResponseCreateParamsBase, + ParsedT = ExtractParsedContentFromParams, +>(params: Params, content: string): ParsedT | null { + if (params.text?.format?.type !== 'json_schema') { + return null; + } + + if ('$parseRaw' in params.text?.format) { + const text_format = params.text?.format as unknown as AutoParseableTextFormat; + return text_format.$parseRaw(content); + } + + return JSON.parse(content); +} + +export function hasAutoParseableInput(params: ResponseCreateParamsWithTools): boolean { + if (isAutoParsableResponseFormat(params.text?.format)) { + return true; + } + + return false; +} + +type ToolOptions = { + name: string; + arguments: any; + function?: ((args: any) => any) | undefined; +}; + +export type AutoParseableResponseTool< + OptionsT extends ToolOptions, + HasFunction = OptionsT['function'] extends Function ? true : false, +> = FunctionTool & { + __arguments: OptionsT['arguments']; // type-level only + __name: OptionsT['name']; // type-level only + + $brand: 'auto-parseable-tool'; + $callback: ((args: OptionsT['arguments']) => any) | undefined; + $parseRaw(args: string): OptionsT['arguments']; +}; + +export function makeParseableResponseTool( + tool: FunctionTool, + { + parser, + callback, + }: { + parser: (content: string) => OptionsT['arguments']; + callback: ((args: any) => any) | undefined; + }, +): AutoParseableResponseTool { + const obj = { ...tool }; + + Object.defineProperties(obj, { + $brand: { + value: 'auto-parseable-tool', + enumerable: false, + }, + $parseRaw: { + value: parser, + enumerable: false, + }, + $callback: { + value: callback, + enumerable: false, + }, + }); + + return obj as AutoParseableResponseTool; +} + +export function isAutoParsableTool(tool: any): tool is AutoParseableResponseTool { + return tool?.['$brand'] === 'auto-parseable-tool'; +} + +function getInputToolByName(input_tools: Array, name: string): FunctionTool | undefined { + return input_tools.find((tool) => tool.type === 'function' && tool.name === name) as + | FunctionTool + | undefined; +} + +function parseToolCall( + params: Params, + toolCall: ResponseFunctionToolCall, +): ParsedResponseFunctionToolCall { + const inputTool = getInputToolByName(params.tools ?? [], toolCall.name); + + return { + ...toolCall, + ...toolCall, + parsed_arguments: + isAutoParsableTool(inputTool) ? inputTool.$parseRaw(toolCall.arguments) + : inputTool?.strict ? JSON.parse(toolCall.arguments) + : null, + }; +} + +export function shouldParseToolCall( + params: ResponseCreateParamsNonStreaming | null | undefined, + toolCall: ResponseFunctionToolCall, +): boolean { + if (!params) { + return false; + } + + const inputTool = getInputToolByName(params.tools ?? [], toolCall.name); + return isAutoParsableTool(inputTool) || inputTool?.strict || false; +} + +export function validateInputTools(tools: ChatCompletionTool[] | undefined) { + for (const tool of tools ?? []) { + if (tool.type !== 'function') { + throw new OpenAIError( + `Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``, + ); + } + + if (tool.function.strict !== true) { + throw new OpenAIError( + `The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`, + ); + } + } +} + +export function addOutputText(rsp: Response): void { + const texts: string[] = []; + for (const output of rsp.output) { + if (output.type !== 'message') { + continue; + } + + for (const content of output.content) { + if (content.type === 'output_text') { + texts.push(content.text); + } + } + } + + rsp.output_text = texts.join(''); +} diff --git a/src/lib/chatCompletionUtils.ts b/src/lib/chatCompletionUtils.ts index a0d9099de..7e9f8a093 100644 --- a/src/lib/chatCompletionUtils.ts +++ b/src/lib/chatCompletionUtils.ts @@ -3,7 +3,7 @@ import { type ChatCompletionFunctionMessageParam, type ChatCompletionMessageParam, type ChatCompletionToolMessageParam, -} from 'openai/resources'; +} from '../resources'; export const isAssistantMessage = ( message: ChatCompletionMessageParam | null | undefined, diff --git a/src/lib/parser.ts b/src/lib/parser.ts index 8bf2a3a36..d75d32a40 100644 --- a/src/lib/parser.ts +++ b/src/lib/parser.ts @@ -13,7 +13,8 @@ import { ParsedFunctionToolCall, } from '../resources/beta/chat/completions'; import { ResponseFormatJSONSchema } from '../resources/shared'; -import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from 'openai/error'; +import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from '../error'; +import { type ResponseFormatTextJSONSchemaConfig } from '../resources/responses/responses'; type AnyChatCompletionCreateParams = | ChatCompletionCreateParams @@ -51,6 +52,33 @@ export function makeParseableResponseFormat( return obj as AutoParseableResponseFormat; } +export type AutoParseableTextFormat = ResponseFormatTextJSONSchemaConfig & { + __output: ParsedT; // type-level only + + $brand: 'auto-parseable-response-format'; + $parseRaw(content: string): ParsedT; +}; + +export function makeParseableTextFormat( + response_format: ResponseFormatTextJSONSchemaConfig, + parser: (content: string) => ParsedT, +): AutoParseableTextFormat { + const obj = { ...response_format }; + + Object.defineProperties(obj, { + $brand: { + value: 'auto-parseable-response-format', + enumerable: false, + }, + $parseRaw: { + value: parser, + enumerable: false, + }, + }); + + return obj as AutoParseableTextFormat; +} + export function isAutoParsableResponseFormat( response_format: any, ): response_format is AutoParseableResponseFormat { @@ -119,7 +147,15 @@ export function maybeParseChatCompletion< ...completion, choices: completion.choices.map((choice) => ({ ...choice, - message: { ...choice.message, parsed: null, tool_calls: choice.message.tool_calls ?? [] }, + message: { + ...choice.message, + parsed: null, + ...(choice.message.tool_calls ? + { + tool_calls: choice.message.tool_calls, + } + : undefined), + }, })), }; } @@ -144,7 +180,12 @@ export function parseChatCompletion< ...choice, message: { ...choice.message, - tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall(params, toolCall)) ?? [], + ...(choice.message.tool_calls ? + { + tool_calls: + choice.message.tool_calls?.map((toolCall) => parseToolCall(params, toolCall)) ?? undefined, + } + : undefined), parsed: choice.message.content && !choice.message.refusal ? parseResponseFormat(params, choice.message.content) diff --git a/src/lib/responses/EventTypes.ts b/src/lib/responses/EventTypes.ts new file mode 100644 index 000000000..fc1620988 --- /dev/null +++ b/src/lib/responses/EventTypes.ts @@ -0,0 +1,76 @@ +import { + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseCodeInterpreterCallCodeDeltaEvent, + ResponseCodeInterpreterCallCodeDoneEvent, + ResponseCodeInterpreterCallCompletedEvent, + ResponseCodeInterpreterCallInProgressEvent, + ResponseCodeInterpreterCallInterpretingEvent, + ResponseCompletedEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreatedEvent, + ResponseErrorEvent, + ResponseFailedEvent, + ResponseFileSearchCallCompletedEvent, + ResponseFileSearchCallInProgressEvent, + ResponseFileSearchCallSearchingEvent, + ResponseFunctionCallArgumentsDeltaEvent as RawResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseInProgressEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseRefusalDeltaEvent, + ResponseRefusalDoneEvent, + ResponseTextAnnotationDeltaEvent, + ResponseTextDeltaEvent as RawResponseTextDeltaEvent, + ResponseTextDoneEvent, + ResponseIncompleteEvent, + ResponseWebSearchCallCompletedEvent, + ResponseWebSearchCallInProgressEvent, + ResponseWebSearchCallSearchingEvent, +} from '../../resources/responses/responses'; + +export type ResponseFunctionCallArgumentsDeltaEvent = RawResponseFunctionCallArgumentsDeltaEvent & { + snapshot: string; +}; + +export type ResponseTextDeltaEvent = RawResponseTextDeltaEvent & { + snapshot: string; +}; + +export type ParsedResponseStreamEvent = + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseCodeInterpreterCallCodeDeltaEvent + | ResponseCodeInterpreterCallCodeDoneEvent + | ResponseCodeInterpreterCallCompletedEvent + | ResponseCodeInterpreterCallInProgressEvent + | ResponseCodeInterpreterCallInterpretingEvent + | ResponseCompletedEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseCreatedEvent + | ResponseErrorEvent + | ResponseFileSearchCallCompletedEvent + | ResponseFileSearchCallInProgressEvent + | ResponseFileSearchCallSearchingEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | ResponseInProgressEvent + | ResponseFailedEvent + | ResponseIncompleteEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseRefusalDeltaEvent + | ResponseRefusalDoneEvent + | ResponseTextAnnotationDeltaEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | ResponseWebSearchCallCompletedEvent + | ResponseWebSearchCallInProgressEvent + | ResponseWebSearchCallSearchingEvent; diff --git a/src/lib/responses/ResponseStream.ts b/src/lib/responses/ResponseStream.ts new file mode 100644 index 000000000..d2ee80a75 --- /dev/null +++ b/src/lib/responses/ResponseStream.ts @@ -0,0 +1,298 @@ +import { + type ParsedResponse, + type Response, + type ResponseCreateParamsBase, + type ResponseCreateParamsStreaming, + type ResponseStreamEvent, +} from '../../resources/responses/responses'; +import * as Core from '../../core'; +import { APIUserAbortError, OpenAIError } from '../../error'; +import OpenAI from '../../index'; +import { type BaseEvents, EventStream } from '../EventStream'; +import { type ResponseFunctionCallArgumentsDeltaEvent, type ResponseTextDeltaEvent } from './EventTypes'; +import { maybeParseResponse } from '../ResponsesParser'; + +export type ResponseStreamParams = Omit & { + stream?: true; +}; + +type ResponseEvents = BaseEvents & + Omit< + { + [K in ResponseStreamEvent['type']]: (event: Extract) => void; + }, + 'response.output_text.delta' | 'response.function_call_arguments.delta' + > & { + event: (event: ResponseStreamEvent) => void; + 'response.output_text.delta': (event: ResponseTextDeltaEvent) => void; + 'response.function_call_arguments.delta': (event: ResponseFunctionCallArgumentsDeltaEvent) => void; + }; + +export type ResponseStreamingParams = Omit & { + stream?: true; +}; + +export class ResponseStream + extends EventStream + implements AsyncIterable +{ + #params: ResponseStreamingParams | null; + #currentResponseSnapshot: Response | undefined; + #finalResponse: ParsedResponse | undefined; + + constructor(params: ResponseStreamingParams | null) { + super(); + this.#params = params; + } + + static createResponse( + client: OpenAI, + params: ResponseStreamParams, + options?: Core.RequestOptions, + ): ResponseStream { + const runner = new ResponseStream(params as ResponseCreateParamsStreaming); + runner._run(() => + runner._createResponse(client, params, { + ...options, + headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' }, + }), + ); + return runner; + } + + #beginRequest() { + if (this.ended) return; + this.#currentResponseSnapshot = undefined; + } + + #addEvent(this: ResponseStream, event: ResponseStreamEvent) { + if (this.ended) return; + + const response = this.#accumulateResponse(event); + this._emit('event', event); + + switch (event.type) { + case 'response.output_text.delta': { + const output = response.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + const content = output.content[event.content_index]; + if (!content) { + throw new OpenAIError(`missing content at index ${event.content_index}`); + } + if (content.type !== 'output_text') { + throw new OpenAIError(`expected content to be 'output_text', got ${content.type}`); + } + + this._emit('response.output_text.delta', { + ...event, + snapshot: content.text, + }); + } + break; + } + case 'response.function_call_arguments.delta': { + const output = response.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'function_call') { + this._emit('response.function_call_arguments.delta', { + ...event, + snapshot: output.arguments, + }); + } + break; + } + default: + // @ts-ignore + this._emit(event.type, event); + break; + } + } + + #endRequest(): ParsedResponse { + if (this.ended) { + throw new OpenAIError(`stream has ended, this shouldn't happen`); + } + const snapshot = this.#currentResponseSnapshot; + if (!snapshot) { + throw new OpenAIError(`request ended without sending any events`); + } + this.#currentResponseSnapshot = undefined; + const parsedResponse = finalizeResponse(snapshot, this.#params); + this.#finalResponse = parsedResponse; + + return parsedResponse; + } + + protected async _createResponse( + client: OpenAI, + params: ResponseStreamingParams, + options?: Core.RequestOptions, + ): Promise> { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + this.#beginRequest(); + + const stream = await client.responses.create( + { ...params, stream: true }, + { ...options, signal: this.controller.signal }, + ); + this._connected(); + for await (const event of stream) { + this.#addEvent(event); + } + if (stream.controller.signal?.aborted) { + throw new APIUserAbortError(); + } + return this.#endRequest(); + } + + #accumulateResponse(event: ResponseStreamEvent): Response { + let snapshot = this.#currentResponseSnapshot; + if (!snapshot) { + if (event.type !== 'response.created') { + throw new OpenAIError( + `When snapshot hasn't been set yet, expected 'response.created' event, got ${event.type}`, + ); + } + snapshot = this.#currentResponseSnapshot = event.response; + return snapshot; + } + + switch (event.type) { + case 'response.output_item.added': { + snapshot.output.push(event.item); + break; + } + case 'response.content_part.added': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + output.content.push(event.part); + } + break; + } + case 'response.output_text.delta': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + const content = output.content[event.content_index]; + if (!content) { + throw new OpenAIError(`missing content at index ${event.content_index}`); + } + if (content.type !== 'output_text') { + throw new OpenAIError(`expected content to be 'output_text', got ${content.type}`); + } + content.text += event.delta; + } + break; + } + case 'response.function_call_arguments.delta': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'function_call') { + output.arguments += event.delta; + } + break; + } + case 'response.completed': { + this.#currentResponseSnapshot = event.response; + break; + } + } + + return snapshot; + } + + [Symbol.asyncIterator](this: ResponseStream): AsyncIterator { + const pushQueue: ResponseStreamEvent[] = []; + const readQueue: { + resolve: (event: ResponseStreamEvent | undefined) => void; + reject: (err: unknown) => void; + }[] = []; + let done = false; + + this.on('event', (event) => { + const reader = readQueue.shift(); + if (reader) { + reader.resolve(event); + } else { + pushQueue.push(event); + } + }); + + this.on('end', () => { + done = true; + for (const reader of readQueue) { + reader.resolve(undefined); + } + readQueue.length = 0; + }); + + this.on('abort', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + this.on('error', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + return { + next: async (): Promise> => { + if (!pushQueue.length) { + if (done) { + return { value: undefined, done: true }; + } + return new Promise((resolve, reject) => + readQueue.push({ resolve, reject }), + ).then((event) => (event ? { value: event, done: false } : { value: undefined, done: true })); + } + const event = pushQueue.shift()!; + return { value: event, done: false }; + }, + return: async () => { + this.abort(); + return { value: undefined, done: true }; + }, + }; + } + + /** + * @returns a promise that resolves with the final Response, or rejects + * if an error occurred or the stream ended prematurely without producing a REsponse. + */ + async finalResponse(): Promise> { + await this.done(); + const response = this.#finalResponse; + if (!response) throw new OpenAIError('stream ended without producing a ChatCompletion'); + return response; + } +} + +function finalizeResponse( + snapshot: Response, + params: ResponseStreamingParams | null, +): ParsedResponse { + return maybeParseResponse(snapshot, params); +} diff --git a/src/pagination.ts b/src/pagination.ts index 63644e333..7a513fc44 100644 --- a/src/pagination.ts +++ b/src/pagination.ts @@ -43,6 +43,8 @@ export class Page extends AbstractPage implements PageResponse export interface CursorPageResponse { data: Array; + + has_more: boolean; } export interface CursorPageParams { @@ -57,6 +59,8 @@ export class CursorPage { data: Array; + has_more: boolean; + constructor( client: APIClient, response: Response, @@ -66,12 +70,21 @@ export class CursorPage super(client, response, body, options); this.data = body.data || []; + this.has_more = body.has_more || false; } getPaginatedItems(): Item[] { return this.data ?? []; } + override hasNextPage(): boolean { + if (this.has_more === false) { + return false; + } + + return super.hasNextPage(); + } + // @deprecated Please use `nextPageInfo()` instead nextPageParams(): Partial | null { const info = this.nextPageInfo(); diff --git a/src/resources.ts b/src/resources.ts new file mode 100644 index 000000000..b283d5781 --- /dev/null +++ b/src/resources.ts @@ -0,0 +1 @@ +export * from './resources/index'; diff --git a/src/resources/audio/audio.ts b/src/resources/audio/audio.ts index e06e28094..071fe5929 100644 --- a/src/resources/audio/audio.ts +++ b/src/resources/audio/audio.ts @@ -1,10 +1,32 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; -import * as AudioAPI from './audio'; import * as SpeechAPI from './speech'; +import { Speech, SpeechCreateParams, SpeechModel } from './speech'; import * as TranscriptionsAPI from './transcriptions'; +import { + Transcription, + TranscriptionCreateParams, + TranscriptionCreateParamsNonStreaming, + TranscriptionCreateParamsStreaming, + TranscriptionCreateResponse, + TranscriptionInclude, + TranscriptionSegment, + TranscriptionStreamEvent, + TranscriptionTextDeltaEvent, + TranscriptionTextDoneEvent, + TranscriptionVerbose, + TranscriptionWord, + Transcriptions, +} from './transcriptions'; import * as TranslationsAPI from './translations'; +import { + Translation, + TranslationCreateParams, + TranslationCreateResponse, + TranslationVerbose, + Translations, +} from './translations'; export class Audio extends APIResource { transcriptions: TranscriptionsAPI.Transcriptions = new TranscriptionsAPI.Transcriptions(this._client); @@ -12,38 +34,45 @@ export class Audio extends APIResource { speech: SpeechAPI.Speech = new SpeechAPI.Speech(this._client); } -export type AudioModel = 'whisper-1'; +export type AudioModel = 'whisper-1' | 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe'; /** * The format of the output, in one of these options: `json`, `text`, `srt`, - * `verbose_json`, or `vtt`. + * `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + * the only supported format is `json`. */ export type AudioResponseFormat = 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt'; -export namespace Audio { - export import AudioModel = AudioAPI.AudioModel; - export import AudioResponseFormat = AudioAPI.AudioResponseFormat; - export import Transcriptions = TranscriptionsAPI.Transcriptions; - export import Transcription = TranscriptionsAPI.Transcription; - export import TranscriptionSegment = TranscriptionsAPI.TranscriptionSegment; - export import TranscriptionVerbose = TranscriptionsAPI.TranscriptionVerbose; - export import TranscriptionWord = TranscriptionsAPI.TranscriptionWord; - export import TranscriptionCreateResponse = TranscriptionsAPI.TranscriptionCreateResponse; - export type TranscriptionCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = - | AudioAPI.AudioResponseFormat - | undefined, - > = TranscriptionsAPI.TranscriptionCreateParams; - export import Translations = TranslationsAPI.Translations; - export import Translation = TranslationsAPI.Translation; - export import TranslationVerbose = TranslationsAPI.TranslationVerbose; - export import TranslationCreateResponse = TranslationsAPI.TranslationCreateResponse; - export type TranslationCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = - | AudioAPI.AudioResponseFormat - | undefined, - > = TranslationsAPI.TranslationCreateParams; - export import Speech = SpeechAPI.Speech; - export import SpeechModel = SpeechAPI.SpeechModel; - export import SpeechCreateParams = SpeechAPI.SpeechCreateParams; +Audio.Transcriptions = Transcriptions; +Audio.Translations = Translations; +Audio.Speech = Speech; + +export declare namespace Audio { + export { type AudioModel as AudioModel, type AudioResponseFormat as AudioResponseFormat }; + + export { + Transcriptions as Transcriptions, + type Transcription as Transcription, + type TranscriptionInclude as TranscriptionInclude, + type TranscriptionSegment as TranscriptionSegment, + type TranscriptionStreamEvent as TranscriptionStreamEvent, + type TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent, + type TranscriptionTextDoneEvent as TranscriptionTextDoneEvent, + type TranscriptionVerbose as TranscriptionVerbose, + type TranscriptionWord as TranscriptionWord, + type TranscriptionCreateResponse as TranscriptionCreateResponse, + type TranscriptionCreateParams as TranscriptionCreateParams, + type TranscriptionCreateParamsNonStreaming as TranscriptionCreateParamsNonStreaming, + type TranscriptionCreateParamsStreaming as TranscriptionCreateParamsStreaming, + }; + + export { + Translations as Translations, + type Translation as Translation, + type TranslationVerbose as TranslationVerbose, + type TranslationCreateResponse as TranslationCreateResponse, + type TranslationCreateParams as TranslationCreateParams, + }; + + export { Speech as Speech, type SpeechModel as SpeechModel, type SpeechCreateParams as SpeechCreateParams }; } diff --git a/src/resources/audio/index.ts b/src/resources/audio/index.ts index 952c05b03..deed39ede 100644 --- a/src/resources/audio/index.ts +++ b/src/resources/audio/index.ts @@ -1,20 +1,26 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { AudioModel, AudioResponseFormat, Audio } from './audio'; -export { SpeechModel, SpeechCreateParams, Speech } from './speech'; +export { Audio, type AudioModel, type AudioResponseFormat } from './audio'; +export { Speech, type SpeechModel, type SpeechCreateParams } from './speech'; export { - Transcription, - TranscriptionSegment, - TranscriptionVerbose, - TranscriptionWord, - TranscriptionCreateResponse, - TranscriptionCreateParams, Transcriptions, + type Transcription, + type TranscriptionInclude, + type TranscriptionSegment, + type TranscriptionStreamEvent, + type TranscriptionTextDeltaEvent, + type TranscriptionTextDoneEvent, + type TranscriptionVerbose, + type TranscriptionWord, + type TranscriptionCreateResponse, + type TranscriptionCreateParams, + type TranscriptionCreateParamsNonStreaming, + type TranscriptionCreateParamsStreaming, } from './transcriptions'; export { - Translation, - TranslationVerbose, - TranslationCreateResponse, - TranslationCreateParams, Translations, + type Translation, + type TranslationVerbose, + type TranslationCreateResponse, + type TranslationCreateParams, } from './translations'; diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts index 34fb26b02..ccd37c092 100644 --- a/src/resources/audio/speech.ts +++ b/src/resources/audio/speech.ts @@ -2,19 +2,35 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as SpeechAPI from './speech'; import { type Response } from '../../_shims/index'; export class Speech extends APIResource { /** * Generates audio from the input text. + * + * @example + * ```ts + * const speech = await client.audio.speech.create({ + * input: 'input', + * model: 'string', + * voice: 'ash', + * }); + * + * const content = await speech.blob(); + * console.log(content); + * ``` */ create(body: SpeechCreateParams, options?: Core.RequestOptions): Core.APIPromise { - return this._client.post('/audio/speech', { body, ...options, __binaryResponse: true }); + return this._client.post('/audio/speech', { + body, + ...options, + headers: { Accept: 'application/octet-stream', ...options?.headers }, + __binaryResponse: true, + }); } } -export type SpeechModel = 'tts-1' | 'tts-1-hd'; +export type SpeechModel = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts'; export interface SpeechCreateParams { /** @@ -23,18 +39,36 @@ export interface SpeechCreateParams { input: string; /** - * One of the available [TTS models](https://platform.openai.com/docs/models/tts): - * `tts-1` or `tts-1-hd` + * One of the available [TTS models](https://platform.openai.com/docs/models#tts): + * `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. */ model: (string & {}) | SpeechModel; /** - * The voice to use when generating the audio. Supported voices are `alloy`, - * `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are - * available in the - * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). + * The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + * `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + * `verse`. Previews of the voices are available in the + * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). */ - voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer'; + voice: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; + + /** + * Control the voice of your generated audio with additional instructions. Does not + * work with `tts-1` or `tts-1-hd`. + */ + instructions?: string; /** * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, @@ -44,12 +78,11 @@ export interface SpeechCreateParams { /** * The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - * the default. + * the default. Does not work with `gpt-4o-mini-tts`. */ speed?: number; } -export namespace Speech { - export import SpeechModel = SpeechAPI.SpeechModel; - export import SpeechCreateParams = SpeechAPI.SpeechCreateParams; +export declare namespace Speech { + export { type SpeechModel as SpeechModel, type SpeechCreateParams as SpeechCreateParams }; } diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts index 902dc9e5f..9e5310874 100644 --- a/src/resources/audio/transcriptions.ts +++ b/src/resources/audio/transcriptions.ts @@ -4,29 +4,58 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; import * as TranscriptionsAPI from './transcriptions'; import * as AudioAPI from './audio'; +import { Stream } from '../../streaming'; export class Transcriptions extends APIResource { /** * Transcribes audio into the input language. + * + * @example + * ```ts + * const transcription = + * await client.audio.transcriptions.create({ + * file: fs.createReadStream('speech.mp3'), + * model: 'gpt-4o-transcribe', + * }); + * ``` */ create( - body: TranscriptionCreateParams<'json' | undefined>, + body: TranscriptionCreateParamsNonStreaming<'json' | undefined>, options?: Core.RequestOptions, ): Core.APIPromise; create( - body: TranscriptionCreateParams<'verbose_json'>, + body: TranscriptionCreateParamsNonStreaming<'verbose_json'>, options?: Core.RequestOptions, ): Core.APIPromise; create( - body: TranscriptionCreateParams<'srt' | 'vtt' | 'text'>, + body: TranscriptionCreateParamsNonStreaming<'srt' | 'vtt' | 'text'>, options?: Core.RequestOptions, ): Core.APIPromise; - create(body: TranscriptionCreateParams, options?: Core.RequestOptions): Core.APIPromise; + create( + body: TranscriptionCreateParamsNonStreaming, + options?: Core.RequestOptions, + ): Core.APIPromise; + create( + body: TranscriptionCreateParamsStreaming, + options?: Core.RequestOptions, + ): Core.APIPromise>; + create( + body: TranscriptionCreateParamsStreaming, + options?: Core.RequestOptions, + ): Core.APIPromise>; create( body: TranscriptionCreateParams, options?: Core.RequestOptions, - ): Core.APIPromise { - return this._client.post('/audio/transcriptions', Core.multipartFormRequestOptions({ body, ...options })); + ): Core.APIPromise> { + return this._client.post( + '/audio/transcriptions', + Core.multipartFormRequestOptions({ + body, + ...options, + stream: body.stream ?? false, + __metadata: { model: body.model }, + }), + ); } } @@ -39,8 +68,36 @@ export interface Transcription { * The transcribed text. */ text: string; + + /** + * The log probabilities of the tokens in the transcription. Only returned with the + * models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added + * to the `include` array. + */ + logprobs?: Array; } +export namespace Transcription { + export interface Logprob { + /** + * The token in the transcription. + */ + token?: string; + + /** + * The bytes of the token. + */ + bytes?: Array; + + /** + * The log probability of the token. + */ + logprob?: number; + } +} + +export type TranscriptionInclude = 'logprobs'; + export interface TranscriptionSegment { /** * Unique identifier of the segment. @@ -96,6 +153,103 @@ export interface TranscriptionSegment { tokens: Array; } +/** + * Emitted when there is an additional text delta. This is also the first event + * emitted when the transcription starts. Only emitted when you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `Stream` parameter set to `true`. + */ +export type TranscriptionStreamEvent = TranscriptionTextDeltaEvent | TranscriptionTextDoneEvent; + +/** + * Emitted when there is an additional text delta. This is also the first event + * emitted when the transcription starts. Only emitted when you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `Stream` parameter set to `true`. + */ +export interface TranscriptionTextDeltaEvent { + /** + * The text delta that was additionally transcribed. + */ + delta: string; + + /** + * The type of the event. Always `transcript.text.delta`. + */ + type: 'transcript.text.delta'; + + /** + * The log probabilities of the delta. Only included if you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `include[]` parameter set to `logprobs`. + */ + logprobs?: Array; +} + +export namespace TranscriptionTextDeltaEvent { + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token?: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes?: Array; + + /** + * The log probability of the token. + */ + logprob?: number; + } +} + +/** + * Emitted when the transcription is complete. Contains the complete transcription + * text. Only emitted when you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `Stream` parameter set to `true`. + */ +export interface TranscriptionTextDoneEvent { + /** + * The text that was transcribed. + */ + text: string; + + /** + * The type of the event. Always `transcript.text.done`. + */ + type: 'transcript.text.done'; + + /** + * The log probabilities of the individual tokens in the transcription. Only + * included if you + * [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) + * with the `include[]` parameter set to `logprobs`. + */ + logprobs?: Array; +} + +export namespace TranscriptionTextDoneEvent { + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token?: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes?: Array; + + /** + * The log probability of the token. + */ + logprob?: number; + } +} + /** * Represents a verbose json transcription response returned by model, based on the * provided input. @@ -104,7 +258,7 @@ export interface TranscriptionVerbose { /** * The duration of the input audio. */ - duration: string; + duration: number; /** * The language of the input audio. @@ -150,7 +304,11 @@ export interface TranscriptionWord { */ export type TranscriptionCreateResponse = Transcription | TranscriptionVerbose; -export interface TranscriptionCreateParams< +export type TranscriptionCreateParams< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, +> = TranscriptionCreateParamsNonStreaming | TranscriptionCreateParamsStreaming; + +export interface TranscriptionCreateParamsBase< ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, > { /** @@ -160,32 +318,63 @@ export interface TranscriptionCreateParams< file: Core.Uploadable; /** - * ID of the model to use. Only `whisper-1` (which is powered by our open source - * Whisper V2 model) is currently available. + * ID of the model to use. The options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + * Whisper V2 model). */ model: (string & {}) | AudioAPI.AudioModel; + /** + * Controls how the audio is cut into chunks. When set to `"auto"`, the server + * first normalizes loudness and then uses voice activity detection (VAD) to choose + * boundaries. `server_vad` object can be provided to tweak VAD detection + * parameters manually. If unset, the audio is transcribed as a single block. + */ + chunking_strategy?: 'auto' | TranscriptionCreateParams.VadConfig | null; + + /** + * Additional information to include in the transcription response. `logprobs` will + * return the log probabilities of the tokens in the response to understand the + * model's confidence in the transcription. `logprobs` only works with + * response_format set to `json` and only with the models `gpt-4o-transcribe` and + * `gpt-4o-mini-transcribe`. + */ + include?: Array; + /** * The language of the input audio. Supplying the input language in - * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will - * improve accuracy and latency. + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. */ language?: string; /** * An optional text to guide the model's style or continue a previous audio * segment. The - * [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) * should match the audio language. */ prompt?: string; /** * The format of the output, in one of these options: `json`, `text`, `srt`, - * `verbose_json`, or `vtt`. + * `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + * the only supported format is `json`. */ response_format?: ResponseFormat; + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + * for more information. + * + * Note: Streaming is not supported for the `whisper-1` model and will be ignored. + */ + stream?: boolean | null; + /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the * output more random, while lower values like 0.2 will make it more focused and @@ -205,15 +394,80 @@ export interface TranscriptionCreateParams< timestamp_granularities?: Array<'word' | 'segment'>; } -export namespace Transcriptions { - export import Transcription = TranscriptionsAPI.Transcription; - export import TranscriptionSegment = TranscriptionsAPI.TranscriptionSegment; - export import TranscriptionVerbose = TranscriptionsAPI.TranscriptionVerbose; - export import TranscriptionWord = TranscriptionsAPI.TranscriptionWord; - export import TranscriptionCreateResponse = TranscriptionsAPI.TranscriptionCreateResponse; - export type TranscriptionCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = - | AudioAPI.AudioResponseFormat - | undefined, - > = TranscriptionsAPI.TranscriptionCreateParams; +export namespace TranscriptionCreateParams { + export interface VadConfig { + /** + * Must be set to `server_vad` to enable manual chunking using server side VAD. + */ + type: 'server_vad'; + + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). With shorter values + * the model will respond more quickly, but may jump in on short pauses from the + * user. + */ + silence_duration_ms?: number; + + /** + * Sensitivity threshold (0.0 to 1.0) for voice activity detection. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + } + + export type TranscriptionCreateParamsNonStreaming = TranscriptionsAPI.TranscriptionCreateParamsNonStreaming; + export type TranscriptionCreateParamsStreaming = TranscriptionsAPI.TranscriptionCreateParamsStreaming; +} + +export interface TranscriptionCreateParamsNonStreaming< + ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = AudioAPI.AudioResponseFormat | undefined, +> extends TranscriptionCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + * for more information. + * + * Note: Streaming is not supported for the `whisper-1` model and will be ignored. + */ + stream?: false | null; +} + +export interface TranscriptionCreateParamsStreaming extends TranscriptionCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + * for more information. + * + * Note: Streaming is not supported for the `whisper-1` model and will be ignored. + */ + stream: true; +} + +export declare namespace Transcriptions { + export { + type Transcription as Transcription, + type TranscriptionInclude as TranscriptionInclude, + type TranscriptionSegment as TranscriptionSegment, + type TranscriptionStreamEvent as TranscriptionStreamEvent, + type TranscriptionTextDeltaEvent as TranscriptionTextDeltaEvent, + type TranscriptionTextDoneEvent as TranscriptionTextDoneEvent, + type TranscriptionVerbose as TranscriptionVerbose, + type TranscriptionWord as TranscriptionWord, + type TranscriptionCreateResponse as TranscriptionCreateResponse, + type TranscriptionCreateParams as TranscriptionCreateParams, + type TranscriptionCreateParamsNonStreaming as TranscriptionCreateParamsNonStreaming, + type TranscriptionCreateParamsStreaming as TranscriptionCreateParamsStreaming, + }; } diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts index 36c2dc7c2..1edb71a7d 100644 --- a/src/resources/audio/translations.ts +++ b/src/resources/audio/translations.ts @@ -2,13 +2,20 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as TranslationsAPI from './translations'; import * as AudioAPI from './audio'; import * as TranscriptionsAPI from './transcriptions'; export class Translations extends APIResource { /** * Translates audio into English. + * + * @example + * ```ts + * const translation = await client.audio.translations.create({ + * file: fs.createReadStream('speech.mp3'), + * model: 'whisper-1', + * }); + * ``` */ create( body: TranslationCreateParams<'json' | undefined>, @@ -27,7 +34,10 @@ export class Translations extends APIResource { body: TranslationCreateParams, options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/audio/translations', Core.multipartFormRequestOptions({ body, ...options })); + return this._client.post( + '/audio/translations', + Core.multipartFormRequestOptions({ body, ...options, __metadata: { model: body.model } }), + ); } } @@ -39,7 +49,7 @@ export interface TranslationVerbose { /** * The duration of the input audio. */ - duration: string; + duration: number; /** * The language of the output translation (always `english`). @@ -77,7 +87,7 @@ export interface TranslationCreateParams< /** * An optional text to guide the model's style or continue a previous audio * segment. The - * [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) * should be in English. */ prompt?: string; @@ -86,7 +96,7 @@ export interface TranslationCreateParams< * The format of the output, in one of these options: `json`, `text`, `srt`, * `verbose_json`, or `vtt`. */ - response_format?: ResponseFormat; + response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt'; /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the @@ -98,13 +108,11 @@ export interface TranslationCreateParams< temperature?: number; } -export namespace Translations { - export import Translation = TranslationsAPI.Translation; - export import TranslationVerbose = TranslationsAPI.TranslationVerbose; - export import TranslationCreateResponse = TranslationsAPI.TranslationCreateResponse; - export type TranslationCreateParams< - ResponseFormat extends AudioAPI.AudioResponseFormat | undefined = - | AudioAPI.AudioResponseFormat - | undefined, - > = TranslationsAPI.TranslationCreateParams; +export declare namespace Translations { + export { + type Translation as Translation, + type TranslationVerbose as TranslationVerbose, + type TranslationCreateResponse as TranslationCreateResponse, + type TranslationCreateParams as TranslationCreateParams, + }; } diff --git a/src/resources/batches.ts b/src/resources/batches.ts index 738582f9e..2cf2ac566 100644 --- a/src/resources/batches.ts +++ b/src/resources/batches.ts @@ -4,6 +4,7 @@ import { APIResource } from '../resource'; import { isRequestOptions } from '../core'; import * as Core from '../core'; import * as BatchesAPI from './batches'; +import * as Shared from './shared'; import { CursorPage, type CursorPageParams } from '../pagination'; export class Batches extends APIResource { @@ -138,11 +139,13 @@ export interface Batch { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The ID of the file containing the outputs of successfully executed requests. @@ -217,11 +220,11 @@ export interface BatchCreateParams { /** * The endpoint to be used for all requests in the batch. Currently - * `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - * Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - * embedding inputs across all requests in the batch. + * `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + * are supported. Note that `/v1/embeddings` batches are also restricted to a + * maximum of 50,000 embedding inputs across all requests in the batch. */ - endpoint: '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions'; + endpoint: '/v1/responses' | '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions'; /** * The ID of an uploaded file that contains requests for the new batch. @@ -232,23 +235,32 @@ export interface BatchCreateParams { * Your input file must be formatted as a * [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), * and must be uploaded with the purpose `batch`. The file can contain up to 50,000 - * requests, and can be up to 100 MB in size. + * requests, and can be up to 200 MB in size. */ input_file_id: string; /** - * Optional custom metadata for the batch. + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: Record | null; + metadata?: Shared.Metadata | null; } export interface BatchListParams extends CursorPageParams {} -export namespace Batches { - export import Batch = BatchesAPI.Batch; - export import BatchError = BatchesAPI.BatchError; - export import BatchRequestCounts = BatchesAPI.BatchRequestCounts; - export import BatchesPage = BatchesAPI.BatchesPage; - export import BatchCreateParams = BatchesAPI.BatchCreateParams; - export import BatchListParams = BatchesAPI.BatchListParams; +Batches.BatchesPage = BatchesPage; + +export declare namespace Batches { + export { + type Batch as Batch, + type BatchError as BatchError, + type BatchRequestCounts as BatchRequestCounts, + BatchesPage as BatchesPage, + type BatchCreateParams as BatchCreateParams, + type BatchListParams as BatchListParams, + }; } diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 410d520b0..95581bbc8 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -3,19 +3,24 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; -import * as AssistantsAPI from './assistants'; import * as Shared from '../shared'; -import * as ChatAPI from '../chat/chat'; import * as MessagesAPI from './threads/messages'; import * as ThreadsAPI from './threads/threads'; -import * as VectorStoresAPI from './vector-stores/vector-stores'; import * as RunsAPI from './threads/runs/runs'; import * as StepsAPI from './threads/runs/steps'; import { CursorPage, type CursorPageParams } from '../../pagination'; +import { AssistantStream } from '../../lib/AssistantStream'; export class Assistants extends APIResource { /** * Create an assistant with a model and instructions. + * + * @example + * ```ts + * const assistant = await client.beta.assistants.create({ + * model: 'gpt-4o', + * }); + * ``` */ create(body: AssistantCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/assistants', { @@ -27,6 +32,13 @@ export class Assistants extends APIResource { /** * Retrieves an assistant. + * + * @example + * ```ts + * const assistant = await client.beta.assistants.retrieve( + * 'assistant_id', + * ); + * ``` */ retrieve(assistantId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/assistants/${assistantId}`, { @@ -37,6 +49,13 @@ export class Assistants extends APIResource { /** * Modifies an assistant. + * + * @example + * ```ts + * const assistant = await client.beta.assistants.update( + * 'assistant_id', + * ); + * ``` */ update( assistantId: string, @@ -52,6 +71,14 @@ export class Assistants extends APIResource { /** * Returns a list of assistants. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const assistant of client.beta.assistants.list()) { + * // ... + * } + * ``` */ list( query?: AssistantListParams, @@ -74,6 +101,13 @@ export class Assistants extends APIResource { /** * Delete an assistant. + * + * @example + * ```ts + * const assistantDeleted = await client.beta.assistants.del( + * 'assistant_id', + * ); + * ``` */ del(assistantId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/assistants/${assistantId}`, { @@ -112,18 +146,20 @@ export interface Assistant { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model: string; @@ -146,8 +182,8 @@ export interface Assistant { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -298,6 +334,11 @@ export namespace AssistantStreamEvent { data: ThreadsAPI.Thread; event: 'thread.created'; + + /** + * Whether to enable input audio transcription. + */ + enabled?: boolean; } /** @@ -616,7 +657,7 @@ export namespace AssistantStreamEvent { /** * Occurs when an - * [error](https://platform.openai.com/docs/guides/error-codes/api-errors) occurs. + * [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. * This can happen due to an internal server error or a timeout. */ export interface ErrorEvent { @@ -659,7 +700,7 @@ export namespace FileSearchTool { * * Note that the file search tool may output fewer than `max_num_results` results. * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ max_num_results?: number; @@ -669,7 +710,7 @@ export namespace FileSearchTool { * will use the `auto` ranker and a score_threshold of 0. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ ranking_options?: FileSearch.RankingOptions; @@ -681,7 +722,7 @@ export namespace FileSearchTool { * will use the `auto` ranker and a score_threshold of 0. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ export interface RankingOptions { @@ -1084,6 +1125,11 @@ export interface ThreadStreamEvent { data: ThreadsAPI.Thread; event: 'thread.created'; + + /** + * Whether to enable input audio transcription. + */ + enabled?: boolean; } export interface AssistantCreateParams { @@ -1091,10 +1137,10 @@ export interface AssistantCreateParams { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ - model: (string & {}) | ChatAPI.ChatModel; + model: (string & {}) | Shared.ChatModel; /** * The description of the assistant. The maximum length is 512 characters. @@ -1109,21 +1155,33 @@ export interface AssistantCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The name of the assistant. The maximum length is 256 characters. */ name?: string | null; + /** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1221,9 +1279,9 @@ export namespace AssistantCreateParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -1233,12 +1291,53 @@ export namespace AssistantCreateParams { file_ids?: Array; /** - * Set of 16 key-value pairs that can be attached to a vector store. This can be - * useful for storing additional information about the vector store in a structured - * format. Keys can be a maximum of 64 characters long and values can be a maxium - * of 512 characters long. + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. */ - metadata?: unknown; + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } } } } @@ -1258,30 +1357,79 @@ export interface AssistantUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. - */ - model?: string; + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. + */ + model?: + | (string & {}) + | 'gpt-4.1' + | 'gpt-4.1-mini' + | 'gpt-4.1-nano' + | 'gpt-4.1-2025-04-14' + | 'gpt-4.1-mini-2025-04-14' + | 'gpt-4.1-nano-2025-04-14' + | 'o3-mini' + | 'o3-mini-2025-01-31' + | 'o1' + | 'o1-2024-12-17' + | 'gpt-4o' + | 'gpt-4o-2024-11-20' + | 'gpt-4o-2024-08-06' + | 'gpt-4o-2024-05-13' + | 'gpt-4o-mini' + | 'gpt-4o-mini-2024-07-18' + | 'gpt-4.5-preview' + | 'gpt-4.5-preview-2025-02-27' + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613'; /** * The name of the assistant. The maximum length is 256 characters. */ name?: string | null; + /** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -1374,8 +1522,8 @@ export interface AssistantListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; @@ -1386,20 +1534,26 @@ export interface AssistantListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace Assistants { - export import Assistant = AssistantsAPI.Assistant; - export import AssistantDeleted = AssistantsAPI.AssistantDeleted; - export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent; - export import AssistantTool = AssistantsAPI.AssistantTool; - export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool; - export import FileSearchTool = AssistantsAPI.FileSearchTool; - export import FunctionTool = AssistantsAPI.FunctionTool; - export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent; - export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent; - export import RunStreamEvent = AssistantsAPI.RunStreamEvent; - export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent; - export import AssistantsPage = AssistantsAPI.AssistantsPage; - export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams; - export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams; - export import AssistantListParams = AssistantsAPI.AssistantListParams; +Assistants.AssistantsPage = AssistantsPage; + +export declare namespace Assistants { + export { + type Assistant as Assistant, + type AssistantDeleted as AssistantDeleted, + type AssistantStreamEvent as AssistantStreamEvent, + type AssistantTool as AssistantTool, + type CodeInterpreterTool as CodeInterpreterTool, + type FileSearchTool as FileSearchTool, + type FunctionTool as FunctionTool, + type MessageStreamEvent as MessageStreamEvent, + type RunStepStreamEvent as RunStepStreamEvent, + type RunStreamEvent as RunStreamEvent, + type ThreadStreamEvent as ThreadStreamEvent, + AssistantsPage as AssistantsPage, + type AssistantCreateParams as AssistantCreateParams, + type AssistantUpdateParams as AssistantUpdateParams, + type AssistantListParams as AssistantListParams, + }; + + export { AssistantStream }; } diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index 0bcf217a8..6282d4593 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -3,60 +3,197 @@ import { APIResource } from '../../resource'; import * as AssistantsAPI from './assistants'; import * as ChatAPI from './chat/chat'; +import { + Assistant, + AssistantCreateParams, + AssistantDeleted, + AssistantListParams, + AssistantStreamEvent, + AssistantTool, + AssistantUpdateParams, + Assistants, + AssistantsPage, + CodeInterpreterTool, + FileSearchTool, + FunctionTool, + MessageStreamEvent, + RunStepStreamEvent, + RunStreamEvent, + ThreadStreamEvent, +} from './assistants'; +import * as RealtimeAPI from './realtime/realtime'; +import { + ConversationCreatedEvent, + ConversationItem, + ConversationItemContent, + ConversationItemCreateEvent, + ConversationItemCreatedEvent, + ConversationItemDeleteEvent, + ConversationItemDeletedEvent, + ConversationItemInputAudioTranscriptionCompletedEvent, + ConversationItemInputAudioTranscriptionDeltaEvent, + ConversationItemInputAudioTranscriptionFailedEvent, + ConversationItemRetrieveEvent, + ConversationItemTruncateEvent, + ConversationItemTruncatedEvent, + ConversationItemWithReference, + ErrorEvent, + InputAudioBufferAppendEvent, + InputAudioBufferClearEvent, + InputAudioBufferClearedEvent, + InputAudioBufferCommitEvent, + InputAudioBufferCommittedEvent, + InputAudioBufferSpeechStartedEvent, + InputAudioBufferSpeechStoppedEvent, + RateLimitsUpdatedEvent, + Realtime, + RealtimeClientEvent, + RealtimeResponse, + RealtimeResponseStatus, + RealtimeResponseUsage, + RealtimeServerEvent, + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseCancelEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreateEvent, + ResponseCreatedEvent, + ResponseDoneEvent, + ResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseTextDeltaEvent, + ResponseTextDoneEvent, + SessionCreatedEvent, + SessionUpdateEvent, + SessionUpdatedEvent, + TranscriptionSessionUpdate, + TranscriptionSessionUpdatedEvent, +} from './realtime/realtime'; import * as ThreadsAPI from './threads/threads'; -import * as VectorStoresAPI from './vector-stores/vector-stores'; +import { + AssistantResponseFormatOption, + AssistantToolChoice, + AssistantToolChoiceFunction, + AssistantToolChoiceOption, + Thread, + ThreadCreateAndRunParams, + ThreadCreateAndRunParamsNonStreaming, + ThreadCreateAndRunParamsStreaming, + ThreadCreateAndRunPollParams, + ThreadCreateAndRunStreamParams, + ThreadCreateParams, + ThreadDeleted, + ThreadUpdateParams, + Threads, +} from './threads/threads'; +import { Chat } from './chat/chat'; export class Beta extends APIResource { - vectorStores: VectorStoresAPI.VectorStores = new VectorStoresAPI.VectorStores(this._client); + realtime: RealtimeAPI.Realtime = new RealtimeAPI.Realtime(this._client); chat: ChatAPI.Chat = new ChatAPI.Chat(this._client); assistants: AssistantsAPI.Assistants = new AssistantsAPI.Assistants(this._client); threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this._client); } -export namespace Beta { - export import VectorStores = VectorStoresAPI.VectorStores; - export import AutoFileChunkingStrategyParam = VectorStoresAPI.AutoFileChunkingStrategyParam; - export import FileChunkingStrategy = VectorStoresAPI.FileChunkingStrategy; - export import FileChunkingStrategyParam = VectorStoresAPI.FileChunkingStrategyParam; - export import OtherFileChunkingStrategyObject = VectorStoresAPI.OtherFileChunkingStrategyObject; - export import StaticFileChunkingStrategy = VectorStoresAPI.StaticFileChunkingStrategy; - export import StaticFileChunkingStrategyObject = VectorStoresAPI.StaticFileChunkingStrategyObject; - export import StaticFileChunkingStrategyParam = VectorStoresAPI.StaticFileChunkingStrategyParam; - export import VectorStore = VectorStoresAPI.VectorStore; - export import VectorStoreDeleted = VectorStoresAPI.VectorStoreDeleted; - export import VectorStoresPage = VectorStoresAPI.VectorStoresPage; - export import VectorStoreCreateParams = VectorStoresAPI.VectorStoreCreateParams; - export import VectorStoreUpdateParams = VectorStoresAPI.VectorStoreUpdateParams; - export import VectorStoreListParams = VectorStoresAPI.VectorStoreListParams; - export import Chat = ChatAPI.Chat; - export import Assistants = AssistantsAPI.Assistants; - export import Assistant = AssistantsAPI.Assistant; - export import AssistantDeleted = AssistantsAPI.AssistantDeleted; - export import AssistantStreamEvent = AssistantsAPI.AssistantStreamEvent; - export import AssistantTool = AssistantsAPI.AssistantTool; - export import CodeInterpreterTool = AssistantsAPI.CodeInterpreterTool; - export import FileSearchTool = AssistantsAPI.FileSearchTool; - export import FunctionTool = AssistantsAPI.FunctionTool; - export import MessageStreamEvent = AssistantsAPI.MessageStreamEvent; - export import RunStepStreamEvent = AssistantsAPI.RunStepStreamEvent; - export import RunStreamEvent = AssistantsAPI.RunStreamEvent; - export import ThreadStreamEvent = AssistantsAPI.ThreadStreamEvent; - export import AssistantsPage = AssistantsAPI.AssistantsPage; - export import AssistantCreateParams = AssistantsAPI.AssistantCreateParams; - export import AssistantUpdateParams = AssistantsAPI.AssistantUpdateParams; - export import AssistantListParams = AssistantsAPI.AssistantListParams; - export import Threads = ThreadsAPI.Threads; - export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption; - export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice; - export import AssistantToolChoiceFunction = ThreadsAPI.AssistantToolChoiceFunction; - export import AssistantToolChoiceOption = ThreadsAPI.AssistantToolChoiceOption; - export import Thread = ThreadsAPI.Thread; - export import ThreadDeleted = ThreadsAPI.ThreadDeleted; - export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams; - export import ThreadUpdateParams = ThreadsAPI.ThreadUpdateParams; - export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams; - export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; - export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; - export import ThreadCreateAndRunPollParams = ThreadsAPI.ThreadCreateAndRunPollParams; - export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams; +Beta.Realtime = Realtime; +Beta.Assistants = Assistants; +Beta.AssistantsPage = AssistantsPage; +Beta.Threads = Threads; + +export declare namespace Beta { + export { + Realtime as Realtime, + type ConversationCreatedEvent as ConversationCreatedEvent, + type ConversationItem as ConversationItem, + type ConversationItemContent as ConversationItemContent, + type ConversationItemCreateEvent as ConversationItemCreateEvent, + type ConversationItemCreatedEvent as ConversationItemCreatedEvent, + type ConversationItemDeleteEvent as ConversationItemDeleteEvent, + type ConversationItemDeletedEvent as ConversationItemDeletedEvent, + type ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent, + type ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent, + type ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent, + type ConversationItemRetrieveEvent as ConversationItemRetrieveEvent, + type ConversationItemTruncateEvent as ConversationItemTruncateEvent, + type ConversationItemTruncatedEvent as ConversationItemTruncatedEvent, + type ConversationItemWithReference as ConversationItemWithReference, + type ErrorEvent as ErrorEvent, + type InputAudioBufferAppendEvent as InputAudioBufferAppendEvent, + type InputAudioBufferClearEvent as InputAudioBufferClearEvent, + type InputAudioBufferClearedEvent as InputAudioBufferClearedEvent, + type InputAudioBufferCommitEvent as InputAudioBufferCommitEvent, + type InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent, + type InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, + type InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent, + type RateLimitsUpdatedEvent as RateLimitsUpdatedEvent, + type RealtimeClientEvent as RealtimeClientEvent, + type RealtimeResponse as RealtimeResponse, + type RealtimeResponseStatus as RealtimeResponseStatus, + type RealtimeResponseUsage as RealtimeResponseUsage, + type RealtimeServerEvent as RealtimeServerEvent, + type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, + type ResponseAudioDoneEvent as ResponseAudioDoneEvent, + type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, + type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, + type ResponseCancelEvent as ResponseCancelEvent, + type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, + type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, + type ResponseCreateEvent as ResponseCreateEvent, + type ResponseCreatedEvent as ResponseCreatedEvent, + type ResponseDoneEvent as ResponseDoneEvent, + type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, + type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, + type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, + type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, + type ResponseTextDeltaEvent as ResponseTextDeltaEvent, + type ResponseTextDoneEvent as ResponseTextDoneEvent, + type SessionCreatedEvent as SessionCreatedEvent, + type SessionUpdateEvent as SessionUpdateEvent, + type SessionUpdatedEvent as SessionUpdatedEvent, + type TranscriptionSessionUpdate as TranscriptionSessionUpdate, + type TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent, + }; + + export { Chat }; + + export { + Assistants as Assistants, + type Assistant as Assistant, + type AssistantDeleted as AssistantDeleted, + type AssistantStreamEvent as AssistantStreamEvent, + type AssistantTool as AssistantTool, + type CodeInterpreterTool as CodeInterpreterTool, + type FileSearchTool as FileSearchTool, + type FunctionTool as FunctionTool, + type MessageStreamEvent as MessageStreamEvent, + type RunStepStreamEvent as RunStepStreamEvent, + type RunStreamEvent as RunStreamEvent, + type ThreadStreamEvent as ThreadStreamEvent, + AssistantsPage as AssistantsPage, + type AssistantCreateParams as AssistantCreateParams, + type AssistantUpdateParams as AssistantUpdateParams, + type AssistantListParams as AssistantListParams, + }; + + export { + Threads as Threads, + type AssistantResponseFormatOption as AssistantResponseFormatOption, + type AssistantToolChoice as AssistantToolChoice, + type AssistantToolChoiceFunction as AssistantToolChoiceFunction, + type AssistantToolChoiceOption as AssistantToolChoiceOption, + type Thread as Thread, + type ThreadDeleted as ThreadDeleted, + type ThreadCreateParams as ThreadCreateParams, + type ThreadUpdateParams as ThreadUpdateParams, + type ThreadCreateAndRunParams as ThreadCreateAndRunParams, + type ThreadCreateAndRunParamsNonStreaming as ThreadCreateAndRunParamsNonStreaming, + type ThreadCreateAndRunParamsStreaming as ThreadCreateAndRunParamsStreaming, + type ThreadCreateAndRunPollParams, + type ThreadCreateAndRunStreamParams, + }; } diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts index 03ea0aab5..083b9914e 100644 --- a/src/resources/beta/chat/completions.ts +++ b/src/resources/beta/chat/completions.ts @@ -3,29 +3,14 @@ import * as Core from '../../../core'; import { APIResource } from '../../../resource'; import { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from '../../../lib/ChatCompletionRunner'; -export { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from '../../../lib/ChatCompletionRunner'; import { ChatCompletionStreamingRunner, ChatCompletionStreamingFunctionRunnerParams, } from '../../../lib/ChatCompletionStreamingRunner'; -export { - ChatCompletionStreamingRunner, - ChatCompletionStreamingFunctionRunnerParams, -} from '../../../lib/ChatCompletionStreamingRunner'; import { BaseFunctionsArgs } from '../../../lib/RunnableFunction'; -export { - RunnableFunction, - RunnableFunctions, - RunnableFunctionWithParse, - RunnableFunctionWithoutParse, - ParsingFunction, - ParsingToolFunction, -} from '../../../lib/RunnableFunction'; import { RunnerOptions } from '../../../lib/AbstractChatCompletionRunner'; import { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; -export { ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; import { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; -export { ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; import { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; import { ChatCompletion, @@ -34,7 +19,26 @@ import { ChatCompletionMessageToolCall, } from '../../chat/completions'; import { ExtractParsedContentFromParams, parseChatCompletion, validateInputTools } from '../../../lib/parser'; + +export { + ChatCompletionStreamingRunner, + type ChatCompletionStreamingFunctionRunnerParams, +} from '../../../lib/ChatCompletionStreamingRunner'; +export { + type RunnableFunction, + type RunnableFunctions, + type RunnableFunctionWithParse, + type RunnableFunctionWithoutParse, + ParsingFunction, + ParsingToolFunction, +} from '../../../lib/RunnableFunction'; +export { type ChatCompletionToolRunnerParams } from '../../../lib/ChatCompletionRunner'; +export { type ChatCompletionStreamingToolRunnerParams } from '../../../lib/ChatCompletionStreamingRunner'; export { ChatCompletionStream, type ChatCompletionStreamParams } from '../../../lib/ChatCompletionStream'; +export { + ChatCompletionRunner, + type ChatCompletionFunctionRunnerParams, +} from '../../../lib/ChatCompletionRunner'; export interface ParsedFunction extends ChatCompletionMessageToolCall.Function { parsed_arguments?: unknown; @@ -46,7 +50,7 @@ export interface ParsedFunctionToolCall extends ChatCompletionMessageToolCall { export interface ParsedChatCompletionMessage extends ChatCompletionMessage { parsed: ParsedT | null; - tool_calls: Array; + tool_calls?: Array; } export interface ParsedChoice extends ChatCompletion.Choice { diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index 9fcf805a1..b9cef17cb 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -1,54 +1,39 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - Assistant, - AssistantDeleted, - AssistantStreamEvent, - AssistantTool, - CodeInterpreterTool, - FileSearchTool, - FunctionTool, - MessageStreamEvent, - RunStepStreamEvent, - RunStreamEvent, - ThreadStreamEvent, - AssistantCreateParams, - AssistantUpdateParams, - AssistantListParams, AssistantsPage, Assistants, + type Assistant, + type AssistantDeleted, + type AssistantStreamEvent, + type AssistantTool, + type CodeInterpreterTool, + type FileSearchTool, + type FunctionTool, + type MessageStreamEvent, + type RunStepStreamEvent, + type RunStreamEvent, + type ThreadStreamEvent, + type AssistantCreateParams, + type AssistantUpdateParams, + type AssistantListParams, } from './assistants'; -export { - AssistantResponseFormatOption, - AssistantToolChoice, - AssistantToolChoiceFunction, - AssistantToolChoiceOption, - Thread, - ThreadDeleted, - ThreadCreateParams, - ThreadUpdateParams, - ThreadCreateAndRunParams, - ThreadCreateAndRunParamsNonStreaming, - ThreadCreateAndRunParamsStreaming, - ThreadCreateAndRunPollParams, - ThreadCreateAndRunStreamParams, - Threads, -} from './threads/index'; export { Beta } from './beta'; +export { Realtime } from './realtime/index'; export { Chat } from './chat/index'; export { - AutoFileChunkingStrategyParam, - FileChunkingStrategy, - FileChunkingStrategyParam, - OtherFileChunkingStrategyObject, - StaticFileChunkingStrategy, - StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyParam, - VectorStore, - VectorStoreDeleted, - VectorStoreCreateParams, - VectorStoreUpdateParams, - VectorStoreListParams, - VectorStoresPage, - VectorStores, -} from './vector-stores/index'; + Threads, + type AssistantResponseFormatOption, + type AssistantToolChoice, + type AssistantToolChoiceFunction, + type AssistantToolChoiceOption, + type Thread, + type ThreadDeleted, + type ThreadCreateParams, + type ThreadUpdateParams, + type ThreadCreateAndRunParams, + type ThreadCreateAndRunParamsNonStreaming, + type ThreadCreateAndRunParamsStreaming, + type ThreadCreateAndRunPollParams, + type ThreadCreateAndRunStreamParams, +} from './threads/index'; diff --git a/src/resources/beta/realtime/index.ts b/src/resources/beta/realtime/index.ts new file mode 100644 index 000000000..ba51d8a66 --- /dev/null +++ b/src/resources/beta/realtime/index.ts @@ -0,0 +1,9 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Realtime } from './realtime'; +export { Sessions, type Session, type SessionCreateResponse, type SessionCreateParams } from './sessions'; +export { + TranscriptionSessions, + type TranscriptionSession, + type TranscriptionSessionCreateParams, +} from './transcription-sessions'; diff --git a/src/resources/beta/realtime/realtime.ts b/src/resources/beta/realtime/realtime.ts new file mode 100644 index 000000000..26fba883e --- /dev/null +++ b/src/resources/beta/realtime/realtime.ts @@ -0,0 +1,2653 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as RealtimeAPI from './realtime'; +import * as Shared from '../../shared'; +import * as SessionsAPI from './sessions'; +import { + Session as SessionsAPISession, + SessionCreateParams, + SessionCreateResponse, + Sessions, +} from './sessions'; +import * as TranscriptionSessionsAPI from './transcription-sessions'; +import { + TranscriptionSession, + TranscriptionSessionCreateParams, + TranscriptionSessions, +} from './transcription-sessions'; + +export class Realtime extends APIResource { + sessions: SessionsAPI.Sessions = new SessionsAPI.Sessions(this._client); + transcriptionSessions: TranscriptionSessionsAPI.TranscriptionSessions = + new TranscriptionSessionsAPI.TranscriptionSessions(this._client); +} + +/** + * Returned when a conversation is created. Emitted right after session creation. + */ +export interface ConversationCreatedEvent { + /** + * The conversation resource. + */ + conversation: ConversationCreatedEvent.Conversation; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The event type, must be `conversation.created`. + */ + type: 'conversation.created'; +} + +export namespace ConversationCreatedEvent { + /** + * The conversation resource. + */ + export interface Conversation { + /** + * The unique ID of the conversation. + */ + id?: string; + + /** + * The object type, must be `realtime.conversation`. + */ + object?: 'realtime.conversation'; + } +} + +/** + * The item to add to the conversation. + */ +export interface ConversationItem { + /** + * The unique ID of the item, this can be generated by the client to help manage + * server-side context, but is not required because the server will generate one if + * not provided. + */ + id?: string; + + /** + * The arguments of the function call (for `function_call` items). + */ + arguments?: string; + + /** + * The ID of the function call (for `function_call` and `function_call_output` + * items). If passed on a `function_call_output` item, the server will check that a + * `function_call` item with the same ID exists in the conversation history. + */ + call_id?: string; + + /** + * The content of the message, applicable for `message` items. + * + * - Message items of role `system` support only `input_text` content + * - Message items of role `user` support `input_text` and `input_audio` content + * - Message items of role `assistant` support `text` content. + */ + content?: Array; + + /** + * The name of the function being called (for `function_call` items). + */ + name?: string; + + /** + * Identifier for the API object being returned - always `realtime.item`. + */ + object?: 'realtime.item'; + + /** + * The output of the function call (for `function_call_output` items). + */ + output?: string; + + /** + * The role of the message sender (`user`, `assistant`, `system`), only applicable + * for `message` items. + */ + role?: 'user' | 'assistant' | 'system'; + + /** + * The status of the item (`completed`, `incomplete`). These have no effect on the + * conversation, but are accepted for consistency with the + * `conversation.item.created` event. + */ + status?: 'completed' | 'incomplete'; + + /** + * The type of the item (`message`, `function_call`, `function_call_output`). + */ + type?: 'message' | 'function_call' | 'function_call_output'; +} + +export interface ConversationItemContent { + /** + * ID of a previous conversation item to reference (for `item_reference` content + * types in `response.create` events). These can reference both client and server + * created items. + */ + id?: string; + + /** + * Base64-encoded audio bytes, used for `input_audio` content type. + */ + audio?: string; + + /** + * The text content, used for `input_text` and `text` content types. + */ + text?: string; + + /** + * The transcript of the audio, used for `input_audio` content type. + */ + transcript?: string; + + /** + * The content type (`input_text`, `input_audio`, `item_reference`, `text`). + */ + type?: 'input_text' | 'input_audio' | 'item_reference' | 'text'; +} + +/** + * Add a new Item to the Conversation's context, including messages, function + * calls, and function call responses. This event can be used both to populate a + * "history" of the conversation and to add new items mid-stream, but has the + * current limitation that it cannot populate assistant audio messages. + * + * If successful, the server will respond with a `conversation.item.created` event, + * otherwise an `error` event will be sent. + */ +export interface ConversationItemCreateEvent { + /** + * The item to add to the conversation. + */ + item: ConversationItem; + + /** + * The event type, must be `conversation.item.create`. + */ + type: 'conversation.item.create'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; + + /** + * The ID of the preceding item after which the new item will be inserted. If not + * set, the new item will be appended to the end of the conversation. If set to + * `root`, the new item will be added to the beginning of the conversation. If set + * to an existing ID, it allows an item to be inserted mid-conversation. If the ID + * cannot be found, an error will be returned and the item will not be added. + */ + previous_item_id?: string; +} + +/** + * Returned when a conversation item is created. There are several scenarios that + * produce this event: + * + * - The server is generating a Response, which if successful will produce either + * one or two Items, which will be of type `message` (role `assistant`) or type + * `function_call`. + * - The input audio buffer has been committed, either by the client or the server + * (in `server_vad` mode). The server will take the content of the input audio + * buffer and add it to a new user message Item. + * - The client has sent a `conversation.item.create` event to add a new Item to + * the Conversation. + */ +export interface ConversationItemCreatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: ConversationItem; + + /** + * The ID of the preceding item in the Conversation context, allows the client to + * understand the order of the conversation. + */ + previous_item_id: string; + + /** + * The event type, must be `conversation.item.created`. + */ + type: 'conversation.item.created'; +} + +/** + * Send this event when you want to remove any item from the conversation history. + * The server will respond with a `conversation.item.deleted` event, unless the + * item does not exist in the conversation history, in which case the server will + * respond with an error. + */ +export interface ConversationItemDeleteEvent { + /** + * The ID of the item to delete. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.delete`. + */ + type: 'conversation.item.delete'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Returned when an item in the conversation is deleted by the client with a + * `conversation.item.delete` event. This event is used to synchronize the server's + * understanding of the conversation history with the client's view. + */ +export interface ConversationItemDeletedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item that was deleted. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.deleted`. + */ + type: 'conversation.item.deleted'; +} + +/** + * This event is the output of audio transcription for user audio written to the + * user audio buffer. Transcription begins when the input audio buffer is committed + * by the client or server (in `server_vad` mode). Transcription runs + * asynchronously with Response creation, so this event may come before or after + * the Response events. + * + * Realtime API models accept audio natively, and thus input transcription is a + * separate process run on a separate ASR (Automatic Speech Recognition) model, + * currently always `whisper-1`. Thus the transcript may diverge somewhat from the + * model's interpretation, and should be treated as a rough guide. + */ +export interface ConversationItemInputAudioTranscriptionCompletedEvent { + /** + * The index of the content part containing the audio. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item containing the audio. + */ + item_id: string; + + /** + * The transcribed text. + */ + transcript: string; + + /** + * The event type, must be `conversation.item.input_audio_transcription.completed`. + */ + type: 'conversation.item.input_audio_transcription.completed'; + + /** + * The log probabilities of the transcription. + */ + logprobs?: Array | null; +} + +export namespace ConversationItemInputAudioTranscriptionCompletedEvent { + /** + * A log probability object. + */ + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes: Array; + + /** + * The log probability of the token. + */ + logprob: number; + } +} + +/** + * Returned when the text value of an input audio transcription content part is + * updated. + */ +export interface ConversationItemInputAudioTranscriptionDeltaEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.input_audio_transcription.delta`. + */ + type: 'conversation.item.input_audio_transcription.delta'; + + /** + * The index of the content part in the item's content array. + */ + content_index?: number; + + /** + * The text delta. + */ + delta?: string; + + /** + * The log probabilities of the transcription. + */ + logprobs?: Array | null; +} + +export namespace ConversationItemInputAudioTranscriptionDeltaEvent { + /** + * A log probability object. + */ + export interface Logprob { + /** + * The token that was used to generate the log probability. + */ + token: string; + + /** + * The bytes that were used to generate the log probability. + */ + bytes: Array; + + /** + * The log probability of the token. + */ + logprob: number; + } +} + +/** + * Returned when input audio transcription is configured, and a transcription + * request for a user message failed. These events are separate from other `error` + * events so that the client can identify the related Item. + */ +export interface ConversationItemInputAudioTranscriptionFailedEvent { + /** + * The index of the content part containing the audio. + */ + content_index: number; + + /** + * Details of the transcription error. + */ + error: ConversationItemInputAudioTranscriptionFailedEvent.Error; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.input_audio_transcription.failed`. + */ + type: 'conversation.item.input_audio_transcription.failed'; +} + +export namespace ConversationItemInputAudioTranscriptionFailedEvent { + /** + * Details of the transcription error. + */ + export interface Error { + /** + * Error code, if any. + */ + code?: string; + + /** + * A human-readable error message. + */ + message?: string; + + /** + * Parameter related to the error, if any. + */ + param?: string; + + /** + * The type of error. + */ + type?: string; + } +} + +/** + * Send this event when you want to retrieve the server's representation of a + * specific item in the conversation history. This is useful, for example, to + * inspect user audio after noise cancellation and VAD. The server will respond + * with a `conversation.item.retrieved` event, unless the item does not exist in + * the conversation history, in which case the server will respond with an error. + */ +export interface ConversationItemRetrieveEvent { + /** + * The ID of the item to retrieve. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.retrieve`. + */ + type: 'conversation.item.retrieve'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Send this event to truncate a previous assistant message’s audio. The server + * will produce audio faster than realtime, so this event is useful when the user + * interrupts to truncate audio that has already been sent to the client but not + * yet played. This will synchronize the server's understanding of the audio with + * the client's playback. + * + * Truncating audio will delete the server-side text transcript to ensure there is + * not text in the context that hasn't been heard by the user. + * + * If successful, the server will respond with a `conversation.item.truncated` + * event. + */ +export interface ConversationItemTruncateEvent { + /** + * Inclusive duration up to which audio is truncated, in milliseconds. If the + * audio_end_ms is greater than the actual audio duration, the server will respond + * with an error. + */ + audio_end_ms: number; + + /** + * The index of the content part to truncate. Set this to 0. + */ + content_index: number; + + /** + * The ID of the assistant message item to truncate. Only assistant message items + * can be truncated. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.truncate`. + */ + type: 'conversation.item.truncate'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Returned when an earlier assistant audio message item is truncated by the client + * with a `conversation.item.truncate` event. This event is used to synchronize the + * server's understanding of the audio with the client's playback. + * + * This action will truncate the audio and remove the server-side text transcript + * to ensure there is no text in the context that hasn't been heard by the user. + */ +export interface ConversationItemTruncatedEvent { + /** + * The duration up to which the audio was truncated, in milliseconds. + */ + audio_end_ms: number; + + /** + * The index of the content part that was truncated. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the assistant message item that was truncated. + */ + item_id: string; + + /** + * The event type, must be `conversation.item.truncated`. + */ + type: 'conversation.item.truncated'; +} + +/** + * The item to add to the conversation. + */ +export interface ConversationItemWithReference { + /** + * For an item of type (`message` | `function_call` | `function_call_output`) this + * field allows the client to assign the unique ID of the item. It is not required + * because the server will generate one if not provided. + * + * For an item of type `item_reference`, this field is required and is a reference + * to any item that has previously existed in the conversation. + */ + id?: string; + + /** + * The arguments of the function call (for `function_call` items). + */ + arguments?: string; + + /** + * The ID of the function call (for `function_call` and `function_call_output` + * items). If passed on a `function_call_output` item, the server will check that a + * `function_call` item with the same ID exists in the conversation history. + */ + call_id?: string; + + /** + * The content of the message, applicable for `message` items. + * + * - Message items of role `system` support only `input_text` content + * - Message items of role `user` support `input_text` and `input_audio` content + * - Message items of role `assistant` support `text` content. + */ + content?: Array; + + /** + * The name of the function being called (for `function_call` items). + */ + name?: string; + + /** + * Identifier for the API object being returned - always `realtime.item`. + */ + object?: 'realtime.item'; + + /** + * The output of the function call (for `function_call_output` items). + */ + output?: string; + + /** + * The role of the message sender (`user`, `assistant`, `system`), only applicable + * for `message` items. + */ + role?: 'user' | 'assistant' | 'system'; + + /** + * The status of the item (`completed`, `incomplete`). These have no effect on the + * conversation, but are accepted for consistency with the + * `conversation.item.created` event. + */ + status?: 'completed' | 'incomplete'; + + /** + * The type of the item (`message`, `function_call`, `function_call_output`, + * `item_reference`). + */ + type?: 'message' | 'function_call' | 'function_call_output' | 'item_reference'; +} + +/** + * Returned when an error occurs, which could be a client problem or a server + * problem. Most errors are recoverable and the session will stay open, we + * recommend to implementors to monitor and log error messages by default. + */ +export interface ErrorEvent { + /** + * Details of the error. + */ + error: ErrorEvent.Error; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The event type, must be `error`. + */ + type: 'error'; +} + +export namespace ErrorEvent { + /** + * Details of the error. + */ + export interface Error { + /** + * A human-readable error message. + */ + message: string; + + /** + * The type of error (e.g., "invalid_request_error", "server_error"). + */ + type: string; + + /** + * Error code, if any. + */ + code?: string | null; + + /** + * The event_id of the client event that caused the error, if applicable. + */ + event_id?: string | null; + + /** + * Parameter related to the error, if any. + */ + param?: string | null; + } +} + +/** + * Send this event to append audio bytes to the input audio buffer. The audio + * buffer is temporary storage you can write to and later commit. In Server VAD + * mode, the audio buffer is used to detect speech and the server will decide when + * to commit. When Server VAD is disabled, you must commit the audio buffer + * manually. + * + * The client may choose how much audio to place in each event up to a maximum of + * 15 MiB, for example streaming smaller chunks from the client may allow the VAD + * to be more responsive. Unlike made other client events, the server will not send + * a confirmation response to this event. + */ +export interface InputAudioBufferAppendEvent { + /** + * Base64-encoded audio bytes. This must be in the format specified by the + * `input_audio_format` field in the session configuration. + */ + audio: string; + + /** + * The event type, must be `input_audio_buffer.append`. + */ + type: 'input_audio_buffer.append'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Send this event to clear the audio bytes in the buffer. The server will respond + * with an `input_audio_buffer.cleared` event. + */ +export interface InputAudioBufferClearEvent { + /** + * The event type, must be `input_audio_buffer.clear`. + */ + type: 'input_audio_buffer.clear'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Returned when the input audio buffer is cleared by the client with a + * `input_audio_buffer.clear` event. + */ +export interface InputAudioBufferClearedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The event type, must be `input_audio_buffer.cleared`. + */ + type: 'input_audio_buffer.cleared'; +} + +/** + * Send this event to commit the user input audio buffer, which will create a new + * user message item in the conversation. This event will produce an error if the + * input audio buffer is empty. When in Server VAD mode, the client does not need + * to send this event, the server will commit the audio buffer automatically. + * + * Committing the input audio buffer will trigger input audio transcription (if + * enabled in session configuration), but it will not create a response from the + * model. The server will respond with an `input_audio_buffer.committed` event. + */ +export interface InputAudioBufferCommitEvent { + /** + * The event type, must be `input_audio_buffer.commit`. + */ + type: 'input_audio_buffer.commit'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +/** + * Returned when an input audio buffer is committed, either by the client or + * automatically in server VAD mode. The `item_id` property is the ID of the user + * message item that will be created, thus a `conversation.item.created` event will + * also be sent to the client. + */ +export interface InputAudioBufferCommittedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item that will be created. + */ + item_id: string; + + /** + * The ID of the preceding item after which the new item will be inserted. + */ + previous_item_id: string; + + /** + * The event type, must be `input_audio_buffer.committed`. + */ + type: 'input_audio_buffer.committed'; +} + +/** + * Sent by the server when in `server_vad` mode to indicate that speech has been + * detected in the audio buffer. This can happen any time audio is added to the + * buffer (unless speech is already detected). The client may want to use this + * event to interrupt audio playback or provide visual feedback to the user. + * + * The client should expect to receive a `input_audio_buffer.speech_stopped` event + * when speech stops. The `item_id` property is the ID of the user message item + * that will be created when speech stops and will also be included in the + * `input_audio_buffer.speech_stopped` event (unless the client manually commits + * the audio buffer during VAD activation). + */ +export interface InputAudioBufferSpeechStartedEvent { + /** + * Milliseconds from the start of all audio written to the buffer during the + * session when speech was first detected. This will correspond to the beginning of + * audio sent to the model, and thus includes the `prefix_padding_ms` configured in + * the Session. + */ + audio_start_ms: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item that will be created when speech stops. + */ + item_id: string; + + /** + * The event type, must be `input_audio_buffer.speech_started`. + */ + type: 'input_audio_buffer.speech_started'; +} + +/** + * Returned in `server_vad` mode when the server detects the end of speech in the + * audio buffer. The server will also send an `conversation.item.created` event + * with the user message item that is created from the audio buffer. + */ +export interface InputAudioBufferSpeechStoppedEvent { + /** + * Milliseconds since the session started when speech stopped. This will correspond + * to the end of audio sent to the model, and thus includes the + * `min_silence_duration_ms` configured in the Session. + */ + audio_end_ms: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the user message item that will be created. + */ + item_id: string; + + /** + * The event type, must be `input_audio_buffer.speech_stopped`. + */ + type: 'input_audio_buffer.speech_stopped'; +} + +/** + * Emitted at the beginning of a Response to indicate the updated rate limits. When + * a Response is created some tokens will be "reserved" for the output tokens, the + * rate limits shown here reflect that reservation, which is then adjusted + * accordingly once the Response is completed. + */ +export interface RateLimitsUpdatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * List of rate limit information. + */ + rate_limits: Array; + + /** + * The event type, must be `rate_limits.updated`. + */ + type: 'rate_limits.updated'; +} + +export namespace RateLimitsUpdatedEvent { + export interface RateLimit { + /** + * The maximum allowed value for the rate limit. + */ + limit?: number; + + /** + * The name of the rate limit (`requests`, `tokens`). + */ + name?: 'requests' | 'tokens'; + + /** + * The remaining value before the limit is reached. + */ + remaining?: number; + + /** + * Seconds until the rate limit resets. + */ + reset_seconds?: number; + } +} + +/** + * A realtime client event. + */ +export type RealtimeClientEvent = + | ConversationItemCreateEvent + | ConversationItemDeleteEvent + | ConversationItemRetrieveEvent + | ConversationItemTruncateEvent + | InputAudioBufferAppendEvent + | InputAudioBufferClearEvent + | RealtimeClientEvent.OutputAudioBufferClear + | InputAudioBufferCommitEvent + | ResponseCancelEvent + | ResponseCreateEvent + | SessionUpdateEvent + | TranscriptionSessionUpdate; + +export namespace RealtimeClientEvent { + /** + * **WebRTC Only:** Emit to cut off the current audio response. This will trigger + * the server to stop generating audio and emit a `output_audio_buffer.cleared` + * event. This event should be preceded by a `response.cancel` client event to stop + * the generation of the current response. + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferClear { + /** + * The event type, must be `output_audio_buffer.clear`. + */ + type: 'output_audio_buffer.clear'; + + /** + * The unique ID of the client event used for error handling. + */ + event_id?: string; + } +} + +/** + * The response resource. + */ +export interface RealtimeResponse { + /** + * The unique ID of the response. + */ + id?: string; + + /** + * Which conversation the response is added to, determined by the `conversation` + * field in the `response.create` event. If `auto`, the response will be added to + * the default conversation and the value of `conversation_id` will be an id like + * `conv_1234`. If `none`, the response will not be added to any conversation and + * the value of `conversation_id` will be `null`. If responses are being triggered + * by server VAD, the response will be added to the default conversation, thus the + * `conversation_id` will be an id like `conv_1234`. + */ + conversation_id?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls, that was used in this response. + */ + max_output_tokens?: number | 'inf'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * The set of modalities the model used to respond. If there are multiple + * modalities, the model will pick one, for example if `modalities` is + * `["text", "audio"]`, the model could be responding in either text or audio. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The object type, must be `realtime.response`. + */ + object?: 'realtime.response'; + + /** + * The list of output items generated by the response. + */ + output?: Array; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * The final status of the response (`completed`, `cancelled`, `failed`, or + * `incomplete`). + */ + status?: 'completed' | 'cancelled' | 'failed' | 'incomplete'; + + /** + * Additional details about the status. + */ + status_details?: RealtimeResponseStatus; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * Usage statistics for the Response, this will correspond to billing. A Realtime + * API session will maintain a conversation context and append new Items to the + * Conversation, thus output from previous turns (text and audio tokens) will + * become the input for later turns. + */ + usage?: RealtimeResponseUsage; + + /** + * The voice the model used to respond. Current voice options are `alloy`, `ash`, + * `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and + * `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; +} + +/** + * Additional details about the status. + */ +export interface RealtimeResponseStatus { + /** + * A description of the error that caused the response to fail, populated when the + * `status` is `failed`. + */ + error?: RealtimeResponseStatus.Error; + + /** + * The reason the Response did not complete. For a `cancelled` Response, one of + * `turn_detected` (the server VAD detected a new start of speech) or + * `client_cancelled` (the client sent a cancel event). For an `incomplete` + * Response, one of `max_output_tokens` or `content_filter` (the server-side safety + * filter activated and cut off the response). + */ + reason?: 'turn_detected' | 'client_cancelled' | 'max_output_tokens' | 'content_filter'; + + /** + * The type of error that caused the response to fail, corresponding with the + * `status` field (`completed`, `cancelled`, `incomplete`, `failed`). + */ + type?: 'completed' | 'cancelled' | 'incomplete' | 'failed'; +} + +export namespace RealtimeResponseStatus { + /** + * A description of the error that caused the response to fail, populated when the + * `status` is `failed`. + */ + export interface Error { + /** + * Error code, if any. + */ + code?: string; + + /** + * The type of error. + */ + type?: string; + } +} + +/** + * Usage statistics for the Response, this will correspond to billing. A Realtime + * API session will maintain a conversation context and append new Items to the + * Conversation, thus output from previous turns (text and audio tokens) will + * become the input for later turns. + */ +export interface RealtimeResponseUsage { + /** + * Details about the input tokens used in the Response. + */ + input_token_details?: RealtimeResponseUsage.InputTokenDetails; + + /** + * The number of input tokens used in the Response, including text and audio + * tokens. + */ + input_tokens?: number; + + /** + * Details about the output tokens used in the Response. + */ + output_token_details?: RealtimeResponseUsage.OutputTokenDetails; + + /** + * The number of output tokens sent in the Response, including text and audio + * tokens. + */ + output_tokens?: number; + + /** + * The total number of tokens in the Response including input and output text and + * audio tokens. + */ + total_tokens?: number; +} + +export namespace RealtimeResponseUsage { + /** + * Details about the input tokens used in the Response. + */ + export interface InputTokenDetails { + /** + * The number of audio tokens used in the Response. + */ + audio_tokens?: number; + + /** + * The number of cached tokens used in the Response. + */ + cached_tokens?: number; + + /** + * The number of text tokens used in the Response. + */ + text_tokens?: number; + } + + /** + * Details about the output tokens used in the Response. + */ + export interface OutputTokenDetails { + /** + * The number of audio tokens used in the Response. + */ + audio_tokens?: number; + + /** + * The number of text tokens used in the Response. + */ + text_tokens?: number; + } +} + +/** + * A realtime server event. + */ +export type RealtimeServerEvent = + | ConversationCreatedEvent + | ConversationItemCreatedEvent + | ConversationItemDeletedEvent + | ConversationItemInputAudioTranscriptionCompletedEvent + | ConversationItemInputAudioTranscriptionDeltaEvent + | ConversationItemInputAudioTranscriptionFailedEvent + | RealtimeServerEvent.ConversationItemRetrieved + | ConversationItemTruncatedEvent + | ErrorEvent + | InputAudioBufferClearedEvent + | InputAudioBufferCommittedEvent + | InputAudioBufferSpeechStartedEvent + | InputAudioBufferSpeechStoppedEvent + | RateLimitsUpdatedEvent + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseCreatedEvent + | ResponseDoneEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | SessionCreatedEvent + | SessionUpdatedEvent + | TranscriptionSessionUpdatedEvent + | RealtimeServerEvent.OutputAudioBufferStarted + | RealtimeServerEvent.OutputAudioBufferStopped + | RealtimeServerEvent.OutputAudioBufferCleared; + +export namespace RealtimeServerEvent { + /** + * Returned when a conversation item is retrieved with + * `conversation.item.retrieve`. + */ + export interface ConversationItemRetrieved { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: RealtimeAPI.ConversationItem; + + /** + * The event type, must be `conversation.item.retrieved`. + */ + type: 'conversation.item.retrieved'; + } + + /** + * **WebRTC Only:** Emitted when the server begins streaming audio to the client. + * This event is emitted after an audio content part has been added + * (`response.content_part.added`) to the response. + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferStarted { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The unique ID of the response that produced the audio. + */ + response_id: string; + + /** + * The event type, must be `output_audio_buffer.started`. + */ + type: 'output_audio_buffer.started'; + } + + /** + * **WebRTC Only:** Emitted when the output audio buffer has been completely + * drained on the server, and no more audio is forthcoming. This event is emitted + * after the full response data has been sent to the client (`response.done`). + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferStopped { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The unique ID of the response that produced the audio. + */ + response_id: string; + + /** + * The event type, must be `output_audio_buffer.stopped`. + */ + type: 'output_audio_buffer.stopped'; + } + + /** + * **WebRTC Only:** Emitted when the output audio buffer is cleared. This happens + * either in VAD mode when the user has interrupted + * (`input_audio_buffer.speech_started`), or when the client has emitted the + * `output_audio_buffer.clear` event to manually cut off the current audio + * response. + * [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc). + */ + export interface OutputAudioBufferCleared { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The unique ID of the response that produced the audio. + */ + response_id: string; + + /** + * The event type, must be `output_audio_buffer.cleared`. + */ + type: 'output_audio_buffer.cleared'; + } +} + +/** + * Returned when the model-generated audio is updated. + */ +export interface ResponseAudioDeltaEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * Base64-encoded audio data delta. + */ + delta: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.audio.delta`. + */ + type: 'response.audio.delta'; +} + +/** + * Returned when the model-generated audio is done. Also emitted when a Response is + * interrupted, incomplete, or cancelled. + */ +export interface ResponseAudioDoneEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.audio.done`. + */ + type: 'response.audio.done'; +} + +/** + * Returned when the model-generated transcription of audio output is updated. + */ +export interface ResponseAudioTranscriptDeltaEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The transcript delta. + */ + delta: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.audio_transcript.delta`. + */ + type: 'response.audio_transcript.delta'; +} + +/** + * Returned when the model-generated transcription of audio output is done + * streaming. Also emitted when a Response is interrupted, incomplete, or + * cancelled. + */ +export interface ResponseAudioTranscriptDoneEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The final transcript of the audio. + */ + transcript: string; + + /** + * The event type, must be `response.audio_transcript.done`. + */ + type: 'response.audio_transcript.done'; +} + +/** + * Send this event to cancel an in-progress response. The server will respond with + * a `response.cancelled` event or an error if there is no response to cancel. + */ +export interface ResponseCancelEvent { + /** + * The event type, must be `response.cancel`. + */ + type: 'response.cancel'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; + + /** + * A specific response ID to cancel - if not provided, will cancel an in-progress + * response in the default conversation. + */ + response_id?: string; +} + +/** + * Returned when a new content part is added to an assistant message item during + * response generation. + */ +export interface ResponseContentPartAddedEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item to which the content part was added. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The content part that was added. + */ + part: ResponseContentPartAddedEvent.Part; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.content_part.added`. + */ + type: 'response.content_part.added'; +} + +export namespace ResponseContentPartAddedEvent { + /** + * The content part that was added. + */ + export interface Part { + /** + * Base64-encoded audio data (if type is "audio"). + */ + audio?: string; + + /** + * The text content (if type is "text"). + */ + text?: string; + + /** + * The transcript of the audio (if type is "audio"). + */ + transcript?: string; + + /** + * The content type ("text", "audio"). + */ + type?: 'text' | 'audio'; + } +} + +/** + * Returned when a content part is done streaming in an assistant message item. + * Also emitted when a Response is interrupted, incomplete, or cancelled. + */ +export interface ResponseContentPartDoneEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The content part that is done. + */ + part: ResponseContentPartDoneEvent.Part; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.content_part.done`. + */ + type: 'response.content_part.done'; +} + +export namespace ResponseContentPartDoneEvent { + /** + * The content part that is done. + */ + export interface Part { + /** + * Base64-encoded audio data (if type is "audio"). + */ + audio?: string; + + /** + * The text content (if type is "text"). + */ + text?: string; + + /** + * The transcript of the audio (if type is "audio"). + */ + transcript?: string; + + /** + * The content type ("text", "audio"). + */ + type?: 'text' | 'audio'; + } +} + +/** + * This event instructs the server to create a Response, which means triggering + * model inference. When in Server VAD mode, the server will create Responses + * automatically. + * + * A Response will include at least one Item, and may have two, in which case the + * second will be a function call. These Items will be appended to the conversation + * history. + * + * The server will respond with a `response.created` event, events for Items and + * content created, and finally a `response.done` event to indicate the Response is + * complete. + * + * The `response.create` event includes inference configuration like + * `instructions`, and `temperature`. These fields will override the Session's + * configuration for this Response only. + */ +export interface ResponseCreateEvent { + /** + * The event type, must be `response.create`. + */ + type: 'response.create'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; + + /** + * Create a new Realtime response with these parameters + */ + response?: ResponseCreateEvent.Response; +} + +export namespace ResponseCreateEvent { + /** + * Create a new Realtime response with these parameters + */ + export interface Response { + /** + * Controls which conversation the response is added to. Currently supports `auto` + * and `none`, with `auto` as the default value. The `auto` value means that the + * contents of the response will be added to the default conversation. Set this to + * `none` to create an out-of-band response which will not add items to default + * conversation. + */ + conversation?: (string & {}) | 'auto' | 'none'; + + /** + * Input items to include in the prompt for the model. Using this field creates a + * new context for this Response instead of using the default conversation. An + * empty array `[]` will clear the context for this Response. Note that this can + * include references to items from the default conversation. + */ + input?: Array; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function, like `{"type": "function", "function": {"name": "my_function"}}`. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + * `shimmer`, and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; + } + + export namespace Response { + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + } +} + +/** + * Returned when a new Response is created. The first event of response creation, + * where the response is in an initial state of `in_progress`. + */ +export interface ResponseCreatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The response resource. + */ + response: RealtimeResponse; + + /** + * The event type, must be `response.created`. + */ + type: 'response.created'; +} + +/** + * Returned when a Response is done streaming. Always emitted, no matter the final + * state. The Response object included in the `response.done` event will include + * all output Items in the Response but will omit the raw audio data. + */ +export interface ResponseDoneEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The response resource. + */ + response: RealtimeResponse; + + /** + * The event type, must be `response.done`. + */ + type: 'response.done'; +} + +/** + * Returned when the model-generated function call arguments are updated. + */ +export interface ResponseFunctionCallArgumentsDeltaEvent { + /** + * The ID of the function call. + */ + call_id: string; + + /** + * The arguments delta as a JSON string. + */ + delta: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the function call item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.function_call_arguments.delta`. + */ + type: 'response.function_call_arguments.delta'; +} + +/** + * Returned when the model-generated function call arguments are done streaming. + * Also emitted when a Response is interrupted, incomplete, or cancelled. + */ +export interface ResponseFunctionCallArgumentsDoneEvent { + /** + * The final arguments as a JSON string. + */ + arguments: string; + + /** + * The ID of the function call. + */ + call_id: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the function call item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.function_call_arguments.done`. + */ + type: 'response.function_call_arguments.done'; +} + +/** + * Returned when a new Item is created during Response generation. + */ +export interface ResponseOutputItemAddedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: ConversationItem; + + /** + * The index of the output item in the Response. + */ + output_index: number; + + /** + * The ID of the Response to which the item belongs. + */ + response_id: string; + + /** + * The event type, must be `response.output_item.added`. + */ + type: 'response.output_item.added'; +} + +/** + * Returned when an Item is done streaming. Also emitted when a Response is + * interrupted, incomplete, or cancelled. + */ +export interface ResponseOutputItemDoneEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The item to add to the conversation. + */ + item: ConversationItem; + + /** + * The index of the output item in the Response. + */ + output_index: number; + + /** + * The ID of the Response to which the item belongs. + */ + response_id: string; + + /** + * The event type, must be `response.output_item.done`. + */ + type: 'response.output_item.done'; +} + +/** + * Returned when the text value of a "text" content part is updated. + */ +export interface ResponseTextDeltaEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The text delta. + */ + delta: string; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The event type, must be `response.text.delta`. + */ + type: 'response.text.delta'; +} + +/** + * Returned when the text value of a "text" content part is done streaming. Also + * emitted when a Response is interrupted, incomplete, or cancelled. + */ +export interface ResponseTextDoneEvent { + /** + * The index of the content part in the item's content array. + */ + content_index: number; + + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item in the response. + */ + output_index: number; + + /** + * The ID of the response. + */ + response_id: string; + + /** + * The final text content. + */ + text: string; + + /** + * The event type, must be `response.text.done`. + */ + type: 'response.text.done'; +} + +/** + * Returned when a Session is created. Emitted automatically when a new connection + * is established as the first server event. This event will contain the default + * Session configuration. + */ +export interface SessionCreatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * Realtime session object configuration. + */ + session: SessionsAPI.Session; + + /** + * The event type, must be `session.created`. + */ + type: 'session.created'; +} + +/** + * Send this event to update the session’s default configuration. The client may + * send this event at any time to update any field, except for `voice`. However, + * note that once a session has been initialized with a particular `model`, it + * can’t be changed to another model using `session.update`. + * + * When the server receives a `session.update`, it will respond with a + * `session.updated` event showing the full, effective configuration. Only the + * fields that are present are updated. To clear a field like `instructions`, pass + * an empty string. + */ +export interface SessionUpdateEvent { + /** + * Realtime session object configuration. + */ + session: SessionUpdateEvent.Session; + + /** + * The event type, must be `session.update`. + */ + type: 'session.update'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +export namespace SessionUpdateEvent { + /** + * Realtime session object configuration. + */ + export interface Session { + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: Session.InputAudioNoiseReduction; + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. + */ + input_audio_transcription?: Session.InputAudioTranscription; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The Realtime model used for this session. + */ + model?: + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * For `pcm16`, output audio is sampled at a rate of 24kHz. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + * temperature of 0.8 is highly recommended for best performance. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + turn_detection?: Session.TurnDetection; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + * `shimmer`, and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; + } + + export namespace Session { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. + */ + model?: string; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; + } + + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. + */ + create_response?: boolean; + + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. + */ + interrupt_response?: boolean; + + /** + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection. + */ + type?: 'server_vad' | 'semantic_vad'; + } + } +} + +/** + * Returned when a session is updated with a `session.update` event, unless there + * is an error. + */ +export interface SessionUpdatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * Realtime session object configuration. + */ + session: SessionsAPI.Session; + + /** + * The event type, must be `session.updated`. + */ + type: 'session.updated'; +} + +/** + * Send this event to update a transcription session. + */ +export interface TranscriptionSessionUpdate { + /** + * Realtime transcription session object configuration. + */ + session: TranscriptionSessionUpdate.Session; + + /** + * The event type, must be `transcription_session.update`. + */ + type: 'transcription_session.update'; + + /** + * Optional client-generated ID used to identify this event. + */ + event_id?: string; +} + +export namespace TranscriptionSessionUpdate { + /** + * Realtime transcription session object configuration. + */ + export interface Session { + /** + * The set of items to include in the transcription. Current available items are: + * + * - `item.input_audio_transcription.logprobs` + */ + include?: Array; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: Session.InputAudioNoiseReduction; + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + input_audio_transcription?: Session.InputAudioTranscription; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + turn_detection?: Session.TurnDetection; + } + + export namespace Session { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. + */ + model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1'; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; + } + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. Not available for transcription sessions. + */ + create_response?: boolean; + + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. Not available for transcription sessions. + */ + interrupt_response?: boolean; + + /** + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection. + */ + type?: 'server_vad' | 'semantic_vad'; + } + } +} + +/** + * Returned when a transcription session is updated with a + * `transcription_session.update` event, unless there is an error. + */ +export interface TranscriptionSessionUpdatedEvent { + /** + * The unique ID of the server event. + */ + event_id: string; + + /** + * A new Realtime transcription session configuration. + * + * When a session is created on the server via REST API, the session object also + * contains an ephemeral key. Default TTL for keys is 10 minutes. This property is + * not present when a session is updated via the WebSocket API. + */ + session: TranscriptionSessionsAPI.TranscriptionSession; + + /** + * The event type, must be `transcription_session.updated`. + */ + type: 'transcription_session.updated'; +} + +Realtime.Sessions = Sessions; +Realtime.TranscriptionSessions = TranscriptionSessions; + +export declare namespace Realtime { + export { + type ConversationCreatedEvent as ConversationCreatedEvent, + type ConversationItem as ConversationItem, + type ConversationItemContent as ConversationItemContent, + type ConversationItemCreateEvent as ConversationItemCreateEvent, + type ConversationItemCreatedEvent as ConversationItemCreatedEvent, + type ConversationItemDeleteEvent as ConversationItemDeleteEvent, + type ConversationItemDeletedEvent as ConversationItemDeletedEvent, + type ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent, + type ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent, + type ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent, + type ConversationItemRetrieveEvent as ConversationItemRetrieveEvent, + type ConversationItemTruncateEvent as ConversationItemTruncateEvent, + type ConversationItemTruncatedEvent as ConversationItemTruncatedEvent, + type ConversationItemWithReference as ConversationItemWithReference, + type ErrorEvent as ErrorEvent, + type InputAudioBufferAppendEvent as InputAudioBufferAppendEvent, + type InputAudioBufferClearEvent as InputAudioBufferClearEvent, + type InputAudioBufferClearedEvent as InputAudioBufferClearedEvent, + type InputAudioBufferCommitEvent as InputAudioBufferCommitEvent, + type InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent, + type InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, + type InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent, + type RateLimitsUpdatedEvent as RateLimitsUpdatedEvent, + type RealtimeClientEvent as RealtimeClientEvent, + type RealtimeResponse as RealtimeResponse, + type RealtimeResponseStatus as RealtimeResponseStatus, + type RealtimeResponseUsage as RealtimeResponseUsage, + type RealtimeServerEvent as RealtimeServerEvent, + type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, + type ResponseAudioDoneEvent as ResponseAudioDoneEvent, + type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, + type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, + type ResponseCancelEvent as ResponseCancelEvent, + type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, + type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, + type ResponseCreateEvent as ResponseCreateEvent, + type ResponseCreatedEvent as ResponseCreatedEvent, + type ResponseDoneEvent as ResponseDoneEvent, + type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, + type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, + type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, + type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, + type ResponseTextDeltaEvent as ResponseTextDeltaEvent, + type ResponseTextDoneEvent as ResponseTextDoneEvent, + type SessionCreatedEvent as SessionCreatedEvent, + type SessionUpdateEvent as SessionUpdateEvent, + type SessionUpdatedEvent as SessionUpdatedEvent, + type TranscriptionSessionUpdate as TranscriptionSessionUpdate, + type TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent, + }; + + export { + Sessions as Sessions, + type SessionsAPISession as Session, + type SessionCreateResponse as SessionCreateResponse, + type SessionCreateParams as SessionCreateParams, + }; + + export { + TranscriptionSessions as TranscriptionSessions, + type TranscriptionSession as TranscriptionSession, + type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, + }; +} diff --git a/src/resources/beta/realtime/sessions.ts b/src/resources/beta/realtime/sessions.ts new file mode 100644 index 000000000..a55a2678c --- /dev/null +++ b/src/resources/beta/realtime/sessions.ts @@ -0,0 +1,752 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; + +export class Sessions extends APIResource { + /** + * Create an ephemeral API token for use in client-side applications with the + * Realtime API. Can be configured with the same session parameters as the + * `session.update` client event. + * + * It responds with a session object, plus a `client_secret` key which contains a + * usable ephemeral API token that can be used to authenticate browser clients for + * the Realtime API. + * + * @example + * ```ts + * const session = + * await client.beta.realtime.sessions.create(); + * ``` + */ + create(body: SessionCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/realtime/sessions', { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } +} + +/** + * Realtime session object configuration. + */ +export interface Session { + /** + * Unique identifier for the session that looks like `sess_1234567890abcdef`. + */ + id?: string; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: Session.InputAudioNoiseReduction; + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. + */ + input_audio_transcription?: Session.InputAudioTranscription; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The Realtime model used for this session. + */ + model?: + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * For `pcm16`, output audio is sampled at a rate of 24kHz. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + * temperature of 0.8 is highly recommended for best performance. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + turn_detection?: Session.TurnDetection; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; +} + +export namespace Session { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. + */ + model?: string; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; + } + + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. + */ + create_response?: boolean; + + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. + */ + interrupt_response?: boolean; + + /** + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection. + */ + type?: 'server_vad' | 'semantic_vad'; + } +} + +/** + * A new Realtime session configuration, with an ephermeral key. Default TTL for + * keys is one minute. + */ +export interface SessionCreateResponse { + /** + * Ephemeral key returned by the API. + */ + client_secret: SessionCreateResponse.ClientSecret; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: string; + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + input_audio_transcription?: SessionCreateResponse.InputAudioTranscription; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + output_audio_format?: string; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: SessionCreateResponse.TurnDetection; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; +} + +export namespace SessionCreateResponse { + /** + * Ephemeral key returned by the API. + */ + export interface ClientSecret { + /** + * Timestamp for when the token expires. Currently, all tokens expire after one + * minute. + */ + expires_at: number; + + /** + * Ephemeral key usable in client environments to authenticate connections to the + * Realtime API. Use this in client-side environments rather than a standard API + * token, which should only be used server-side. + */ + value: string; + } + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through Whisper and should be treated as rough guidance rather + * than the representation understood by the model. + */ + export interface InputAudioTranscription { + /** + * The model to use for transcription, `whisper-1` is the only currently supported + * model. + */ + model?: string; + } + + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: string; + } +} + +export interface SessionCreateParams { + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: SessionCreateParams.InputAudioNoiseReduction; + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. + */ + input_audio_transcription?: SessionCreateParams.InputAudioTranscription; + + /** + * The default system instructions (i.e. system message) prepended to model calls. + * This field allows the client to guide the model on desired responses. The model + * can be instructed on response content and format, (e.g. "be extremely succinct", + * "act friendly", "here are examples of good responses") and on audio behavior + * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The + * instructions are not guaranteed to be followed by the model, but they provide + * guidance to the model on the desired behavior. + * + * Note that the server sets default instructions which will be used if this field + * is not set and are visible in the `session.created` event at the start of the + * session. + */ + instructions?: string; + + /** + * Maximum number of output tokens for a single assistant response, inclusive of + * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or + * `inf` for the maximum available tokens for a given model. Defaults to `inf`. + */ + max_response_output_tokens?: number | 'inf'; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * The Realtime model used for this session. + */ + model?: + | 'gpt-4o-realtime-preview' + | 'gpt-4o-realtime-preview-2024-10-01' + | 'gpt-4o-realtime-preview-2024-12-17' + | 'gpt-4o-mini-realtime-preview' + | 'gpt-4o-mini-realtime-preview-2024-12-17'; + + /** + * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + * For `pcm16`, output audio is sampled at a rate of 24kHz. + */ + output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a + * temperature of 0.8 is highly recommended for best performance. + */ + temperature?: number; + + /** + * How the model chooses tools. Options are `auto`, `none`, `required`, or specify + * a function. + */ + tool_choice?: string; + + /** + * Tools (functions) available to the model. + */ + tools?: Array; + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + turn_detection?: SessionCreateParams.TurnDetection; + + /** + * The voice the model uses to respond. Voice cannot be changed during the session + * once the model has responded with audio at least once. Current voice options are + * `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, + * `shimmer`, and `verse`. + */ + voice?: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; +} + +export namespace SessionCreateParams { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + + /** + * Configuration for input audio transcription, defaults to off and can be set to + * `null` to turn off once on. Input audio transcription is not native to the + * model, since the model consumes audio directly. Transcription runs + * asynchronously through + * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) + * and should be treated as guidance of input audio content rather than precisely + * what the model heard. The client can optionally set the language and prompt for + * transcription, these offer additional guidance to the transcription service. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. + */ + model?: string; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; + } + + export interface Tool { + /** + * The description of the function, including guidance on when and how to call it, + * and guidance about what to tell the user when calling (if anything). + */ + description?: string; + + /** + * The name of the function. + */ + name?: string; + + /** + * Parameters of the function in JSON Schema. + */ + parameters?: unknown; + + /** + * The type of the tool, i.e. `function`. + */ + type?: 'function'; + } + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. + */ + create_response?: boolean; + + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. + */ + interrupt_response?: boolean; + + /** + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection. + */ + type?: 'server_vad' | 'semantic_vad'; + } +} + +export declare namespace Sessions { + export { + type Session as Session, + type SessionCreateResponse as SessionCreateResponse, + type SessionCreateParams as SessionCreateParams, + }; +} diff --git a/src/resources/beta/realtime/transcription-sessions.ts b/src/resources/beta/realtime/transcription-sessions.ts new file mode 100644 index 000000000..83e8c47ad --- /dev/null +++ b/src/resources/beta/realtime/transcription-sessions.ts @@ -0,0 +1,314 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; + +export class TranscriptionSessions extends APIResource { + /** + * Create an ephemeral API token for use in client-side applications with the + * Realtime API specifically for realtime transcriptions. Can be configured with + * the same session parameters as the `transcription_session.update` client event. + * + * It responds with a session object, plus a `client_secret` key which contains a + * usable ephemeral API token that can be used to authenticate browser clients for + * the Realtime API. + * + * @example + * ```ts + * const transcriptionSession = + * await client.beta.realtime.transcriptionSessions.create(); + * ``` + */ + create( + body: TranscriptionSessionCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/realtime/transcription_sessions', { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } +} + +/** + * A new Realtime transcription session configuration. + * + * When a session is created on the server via REST API, the session object also + * contains an ephemeral key. Default TTL for keys is 10 minutes. This property is + * not present when a session is updated via the WebSocket API. + */ +export interface TranscriptionSession { + /** + * Ephemeral key returned by the API. Only present when the session is created on + * the server via REST API. + */ + client_secret: TranscriptionSession.ClientSecret; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + */ + input_audio_format?: string; + + /** + * Configuration of the transcription model. + */ + input_audio_transcription?: TranscriptionSession.InputAudioTranscription; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + turn_detection?: TranscriptionSession.TurnDetection; +} + +export namespace TranscriptionSession { + /** + * Ephemeral key returned by the API. Only present when the session is created on + * the server via REST API. + */ + export interface ClientSecret { + /** + * Timestamp for when the token expires. Currently, all tokens expire after one + * minute. + */ + expires_at: number; + + /** + * Ephemeral key usable in client environments to authenticate connections to the + * Realtime API. Use this in client-side environments rather than a standard API + * token, which should only be used server-side. + */ + value: string; + } + + /** + * Configuration of the transcription model. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription. Can be `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, or `whisper-1`. + */ + model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1'; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. The + * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + * should match the audio language. + */ + prompt?: string; + } + + /** + * Configuration for turn detection. Can be set to `null` to turn off. Server VAD + * means that the model will detect the start and end of speech based on audio + * volume and respond at the end of user speech. + */ + export interface TurnDetection { + /** + * Amount of audio to include before the VAD detected speech (in milliseconds). + * Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. + * With shorter values the model will respond more quickly, but may jump in on + * short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher + * threshold will require louder audio to activate the model, and thus might + * perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection, only `server_vad` is currently supported. + */ + type?: string; + } +} + +export interface TranscriptionSessionCreateParams { + /** + * The set of items to include in the transcription. Current available items are: + * + * - `item.input_audio_transcription.logprobs` + */ + include?: Array; + + /** + * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + * (mono), and little-endian byte order. + */ + input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw'; + + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + input_audio_noise_reduction?: TranscriptionSessionCreateParams.InputAudioNoiseReduction; + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + input_audio_transcription?: TranscriptionSessionCreateParams.InputAudioTranscription; + + /** + * The set of modalities the model can respond with. To disable audio, set this to + * ["text"]. + */ + modalities?: Array<'text' | 'audio'>; + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + turn_detection?: TranscriptionSessionCreateParams.TurnDetection; +} + +export namespace TranscriptionSessionCreateParams { + /** + * Configuration for input audio noise reduction. This can be set to `null` to turn + * off. Noise reduction filters audio added to the input audio buffer before it is + * sent to VAD and the model. Filtering the audio can improve VAD and turn + * detection accuracy (reducing false positives) and model performance by improving + * perception of the input audio. + */ + export interface InputAudioNoiseReduction { + /** + * Type of noise reduction. `near_field` is for close-talking microphones such as + * headphones, `far_field` is for far-field microphones such as laptop or + * conference room microphones. + */ + type?: 'near_field' | 'far_field'; + } + + /** + * Configuration for input audio transcription. The client can optionally set the + * language and prompt for transcription, these offer additional guidance to the + * transcription service. + */ + export interface InputAudioTranscription { + /** + * The language of the input audio. Supplying the input language in + * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + * format will improve accuracy and latency. + */ + language?: string; + + /** + * The model to use for transcription, current options are `gpt-4o-transcribe`, + * `gpt-4o-mini-transcribe`, and `whisper-1`. + */ + model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1'; + + /** + * An optional text to guide the model's style or continue a previous audio + * segment. For `whisper-1`, the + * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). + * For `gpt-4o-transcribe` models, the prompt is a free text string, for example + * "expect words related to technology". + */ + prompt?: string; + } + + /** + * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be + * set to `null` to turn off, in which case the client must manually trigger model + * response. Server VAD means that the model will detect the start and end of + * speech based on audio volume and respond at the end of user speech. Semantic VAD + * is more advanced and uses a turn detection model (in conjuction with VAD) to + * semantically estimate whether the user has finished speaking, then dynamically + * sets a timeout based on this probability. For example, if user audio trails off + * with "uhhm", the model will score a low probability of turn end and wait longer + * for the user to continue speaking. This can be useful for more natural + * conversations, but may have a higher latency. + */ + export interface TurnDetection { + /** + * Whether or not to automatically generate a response when a VAD stop event + * occurs. Not available for transcription sessions. + */ + create_response?: boolean; + + /** + * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` + * will wait longer for the user to continue speaking, `high` will respond more + * quickly. `auto` is the default and is equivalent to `medium`. + */ + eagerness?: 'low' | 'medium' | 'high' | 'auto'; + + /** + * Whether or not to automatically interrupt any ongoing response with output to + * the default conversation (i.e. `conversation` of `auto`) when a VAD start event + * occurs. Not available for transcription sessions. + */ + interrupt_response?: boolean; + + /** + * Used only for `server_vad` mode. Amount of audio to include before the VAD + * detected speech (in milliseconds). Defaults to 300ms. + */ + prefix_padding_ms?: number; + + /** + * Used only for `server_vad` mode. Duration of silence to detect speech stop (in + * milliseconds). Defaults to 500ms. With shorter values the model will respond + * more quickly, but may jump in on short pauses from the user. + */ + silence_duration_ms?: number; + + /** + * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this + * defaults to 0.5. A higher threshold will require louder audio to activate the + * model, and thus might perform better in noisy environments. + */ + threshold?: number; + + /** + * Type of turn detection. + */ + type?: 'server_vad' | 'semantic_vad'; + } +} + +export declare namespace TranscriptionSessions { + export { + type TranscriptionSession as TranscriptionSession, + type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, + }; +} diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts index 1964cffb8..f67a1edde 100644 --- a/src/resources/beta/threads/index.ts +++ b/src/resources/beta/threads/index.ts @@ -1,73 +1,73 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - Annotation, - AnnotationDelta, - FileCitationAnnotation, - FileCitationDeltaAnnotation, - FilePathAnnotation, - FilePathDeltaAnnotation, - ImageFile, - ImageFileContentBlock, - ImageFileDelta, - ImageFileDeltaBlock, - ImageURL, - ImageURLContentBlock, - ImageURLDelta, - ImageURLDeltaBlock, - Message, - MessageContent, - MessageContentDelta, - MessageContentPartParam, - MessageDeleted, - MessageDelta, - MessageDeltaEvent, - RefusalContentBlock, - RefusalDeltaBlock, - Text, - TextContentBlock, - TextContentBlockParam, - TextDelta, - TextDeltaBlock, - MessageCreateParams, - MessageUpdateParams, - MessageListParams, MessagesPage, Messages, + type Annotation, + type AnnotationDelta, + type FileCitationAnnotation, + type FileCitationDeltaAnnotation, + type FilePathAnnotation, + type FilePathDeltaAnnotation, + type ImageFile, + type ImageFileContentBlock, + type ImageFileDelta, + type ImageFileDeltaBlock, + type ImageURL, + type ImageURLContentBlock, + type ImageURLDelta, + type ImageURLDeltaBlock, + type Message, + type MessageContent, + type MessageContentDelta, + type MessageContentPartParam, + type MessageDeleted, + type MessageDelta, + type MessageDeltaEvent, + type RefusalContentBlock, + type RefusalDeltaBlock, + type Text, + type TextContentBlock, + type TextContentBlockParam, + type TextDelta, + type TextDeltaBlock, + type MessageCreateParams, + type MessageUpdateParams, + type MessageListParams, } from './messages'; export { - AssistantResponseFormatOption, - AssistantToolChoice, - AssistantToolChoiceFunction, - AssistantToolChoiceOption, - Thread, - ThreadDeleted, - ThreadCreateParams, - ThreadUpdateParams, - ThreadCreateAndRunParams, - ThreadCreateAndRunParamsNonStreaming, - ThreadCreateAndRunParamsStreaming, - ThreadCreateAndRunPollParams, - ThreadCreateAndRunStreamParams, - Threads, -} from './threads'; -export { - RequiredActionFunctionToolCall, - Run, - RunStatus, - RunCreateParams, - RunCreateParamsNonStreaming, - RunCreateParamsStreaming, - RunUpdateParams, - RunListParams, - RunCreateAndPollParams, - RunCreateAndStreamParams, - RunStreamParams, - RunSubmitToolOutputsParams, - RunSubmitToolOutputsParamsNonStreaming, - RunSubmitToolOutputsParamsStreaming, - RunSubmitToolOutputsAndPollParams, - RunSubmitToolOutputsStreamParams, RunsPage, Runs, + type RequiredActionFunctionToolCall, + type Run, + type RunStatus, + type RunCreateParams, + type RunCreateParamsNonStreaming, + type RunCreateParamsStreaming, + type RunUpdateParams, + type RunListParams, + type RunSubmitToolOutputsParams, + type RunSubmitToolOutputsParamsNonStreaming, + type RunSubmitToolOutputsParamsStreaming, + type RunCreateAndPollParams, + type RunCreateAndStreamParams, + type RunStreamParams, + type RunSubmitToolOutputsAndPollParams, + type RunSubmitToolOutputsStreamParams, } from './runs/index'; +export { + Threads, + type AssistantResponseFormatOption, + type AssistantToolChoice, + type AssistantToolChoiceFunction, + type AssistantToolChoiceOption, + type Thread, + type ThreadDeleted, + type ThreadCreateParams, + type ThreadUpdateParams, + type ThreadCreateAndRunParams, + type ThreadCreateAndRunParamsNonStreaming, + type ThreadCreateAndRunParamsStreaming, + type ThreadCreateAndRunPollParams, + type ThreadCreateAndRunStreamParams, +} from './threads'; diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts index 59c92675b..c3834ebe6 100644 --- a/src/resources/beta/threads/messages.ts +++ b/src/resources/beta/threads/messages.ts @@ -3,13 +3,21 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as MessagesAPI from './messages'; +import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Messages extends APIResource { /** * Create a message. + * + * @example + * ```ts + * const message = await client.beta.threads.messages.create( + * 'thread_id', + * { content: 'string', role: 'user' }, + * ); + * ``` */ create( threadId: string, @@ -25,6 +33,14 @@ export class Messages extends APIResource { /** * Retrieve a message. + * + * @example + * ```ts + * const message = await client.beta.threads.messages.retrieve( + * 'thread_id', + * 'message_id', + * ); + * ``` */ retrieve(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}/messages/${messageId}`, { @@ -35,6 +51,14 @@ export class Messages extends APIResource { /** * Modifies a message. + * + * @example + * ```ts + * const message = await client.beta.threads.messages.update( + * 'thread_id', + * 'message_id', + * ); + * ``` */ update( threadId: string, @@ -51,6 +75,16 @@ export class Messages extends APIResource { /** * Returns a list of messages for a given thread. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const message of client.beta.threads.messages.list( + * 'thread_id', + * )) { + * // ... + * } + * ``` */ list( threadId: string, @@ -75,6 +109,15 @@ export class Messages extends APIResource { /** * Deletes a message. + * + * @example + * ```ts + * const messageDeleted = + * await client.beta.threads.messages.del( + * 'thread_id', + * 'message_id', + * ); + * ``` */ del(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/threads/${threadId}/messages/${messageId}`, { @@ -408,11 +451,13 @@ export interface Message { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The object type, which is always `thread.message`. @@ -661,11 +706,13 @@ export interface MessageCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export namespace MessageCreateParams { @@ -694,19 +741,21 @@ export namespace MessageCreateParams { export interface MessageUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export interface MessageListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; @@ -722,37 +771,41 @@ export interface MessageListParams extends CursorPageParams { run_id?: string; } -export namespace Messages { - export import Annotation = MessagesAPI.Annotation; - export import AnnotationDelta = MessagesAPI.AnnotationDelta; - export import FileCitationAnnotation = MessagesAPI.FileCitationAnnotation; - export import FileCitationDeltaAnnotation = MessagesAPI.FileCitationDeltaAnnotation; - export import FilePathAnnotation = MessagesAPI.FilePathAnnotation; - export import FilePathDeltaAnnotation = MessagesAPI.FilePathDeltaAnnotation; - export import ImageFile = MessagesAPI.ImageFile; - export import ImageFileContentBlock = MessagesAPI.ImageFileContentBlock; - export import ImageFileDelta = MessagesAPI.ImageFileDelta; - export import ImageFileDeltaBlock = MessagesAPI.ImageFileDeltaBlock; - export import ImageURL = MessagesAPI.ImageURL; - export import ImageURLContentBlock = MessagesAPI.ImageURLContentBlock; - export import ImageURLDelta = MessagesAPI.ImageURLDelta; - export import ImageURLDeltaBlock = MessagesAPI.ImageURLDeltaBlock; - export import Message = MessagesAPI.Message; - export import MessageContent = MessagesAPI.MessageContent; - export import MessageContentDelta = MessagesAPI.MessageContentDelta; - export import MessageContentPartParam = MessagesAPI.MessageContentPartParam; - export import MessageDeleted = MessagesAPI.MessageDeleted; - export import MessageDelta = MessagesAPI.MessageDelta; - export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent; - export import RefusalContentBlock = MessagesAPI.RefusalContentBlock; - export import RefusalDeltaBlock = MessagesAPI.RefusalDeltaBlock; - export import Text = MessagesAPI.Text; - export import TextContentBlock = MessagesAPI.TextContentBlock; - export import TextContentBlockParam = MessagesAPI.TextContentBlockParam; - export import TextDelta = MessagesAPI.TextDelta; - export import TextDeltaBlock = MessagesAPI.TextDeltaBlock; - export import MessagesPage = MessagesAPI.MessagesPage; - export import MessageCreateParams = MessagesAPI.MessageCreateParams; - export import MessageUpdateParams = MessagesAPI.MessageUpdateParams; - export import MessageListParams = MessagesAPI.MessageListParams; +Messages.MessagesPage = MessagesPage; + +export declare namespace Messages { + export { + type Annotation as Annotation, + type AnnotationDelta as AnnotationDelta, + type FileCitationAnnotation as FileCitationAnnotation, + type FileCitationDeltaAnnotation as FileCitationDeltaAnnotation, + type FilePathAnnotation as FilePathAnnotation, + type FilePathDeltaAnnotation as FilePathDeltaAnnotation, + type ImageFile as ImageFile, + type ImageFileContentBlock as ImageFileContentBlock, + type ImageFileDelta as ImageFileDelta, + type ImageFileDeltaBlock as ImageFileDeltaBlock, + type ImageURL as ImageURL, + type ImageURLContentBlock as ImageURLContentBlock, + type ImageURLDelta as ImageURLDelta, + type ImageURLDeltaBlock as ImageURLDeltaBlock, + type Message as Message, + type MessageContent as MessageContent, + type MessageContentDelta as MessageContentDelta, + type MessageContentPartParam as MessageContentPartParam, + type MessageDeleted as MessageDeleted, + type MessageDelta as MessageDelta, + type MessageDeltaEvent as MessageDeltaEvent, + type RefusalContentBlock as RefusalContentBlock, + type RefusalDeltaBlock as RefusalDeltaBlock, + type Text as Text, + type TextContentBlock as TextContentBlock, + type TextContentBlockParam as TextContentBlockParam, + type TextDelta as TextDelta, + type TextDeltaBlock as TextDeltaBlock, + MessagesPage as MessagesPage, + type MessageCreateParams as MessageCreateParams, + type MessageUpdateParams as MessageUpdateParams, + type MessageListParams as MessageListParams, + }; } diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts index 9496f59e1..9dbe575bc 100644 --- a/src/resources/beta/threads/runs/index.ts +++ b/src/resources/beta/threads/runs/index.ts @@ -1,46 +1,46 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - CodeInterpreterLogs, - CodeInterpreterOutputImage, - CodeInterpreterToolCall, - CodeInterpreterToolCallDelta, - FileSearchToolCall, - FileSearchToolCallDelta, - FunctionToolCall, - FunctionToolCallDelta, - MessageCreationStepDetails, - RunStep, - RunStepDelta, - RunStepDeltaEvent, - RunStepDeltaMessageDelta, - RunStepInclude, - ToolCall, - ToolCallDelta, - ToolCallDeltaObject, - ToolCallsStepDetails, - StepRetrieveParams, - StepListParams, RunStepsPage, Steps, + type CodeInterpreterLogs, + type CodeInterpreterOutputImage, + type CodeInterpreterToolCall, + type CodeInterpreterToolCallDelta, + type FileSearchToolCall, + type FileSearchToolCallDelta, + type FunctionToolCall, + type FunctionToolCallDelta, + type MessageCreationStepDetails, + type RunStep, + type RunStepDelta, + type RunStepDeltaEvent, + type RunStepDeltaMessageDelta, + type RunStepInclude, + type ToolCall, + type ToolCallDelta, + type ToolCallDeltaObject, + type ToolCallsStepDetails, + type StepRetrieveParams, + type StepListParams, } from './steps'; export { - RequiredActionFunctionToolCall, - Run, - RunStatus, - RunCreateParams, - RunCreateParamsNonStreaming, - RunCreateParamsStreaming, - RunUpdateParams, - RunListParams, - RunCreateAndPollParams, - RunCreateAndStreamParams, - RunStreamParams, - RunSubmitToolOutputsParams, - RunSubmitToolOutputsParamsNonStreaming, - RunSubmitToolOutputsParamsStreaming, - RunSubmitToolOutputsAndPollParams, - RunSubmitToolOutputsStreamParams, RunsPage, Runs, + type RequiredActionFunctionToolCall, + type Run, + type RunStatus, + type RunCreateParams, + type RunCreateParamsNonStreaming, + type RunCreateParamsStreaming, + type RunUpdateParams, + type RunListParams, + type RunCreateAndPollParams, + type RunCreateAndStreamParams, + type RunStreamParams, + type RunSubmitToolOutputsParams, + type RunSubmitToolOutputsParamsNonStreaming, + type RunSubmitToolOutputsParamsStreaming, + type RunSubmitToolOutputsAndPollParams, + type RunSubmitToolOutputsStreamParams, } from './runs'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index b48edd5b1..25356df3c 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -8,11 +8,35 @@ import { AssistantStream, RunCreateParamsBaseStream } from '../../../../lib/Assi import { sleep } from '../../../../core'; import { RunSubmitToolOutputsParamsStream } from '../../../../lib/AssistantStream'; import * as RunsAPI from './runs'; +import * as Shared from '../../../shared'; import * as AssistantsAPI from '../../assistants'; -import * as ChatAPI from '../../../chat/chat'; import * as MessagesAPI from '../messages'; import * as ThreadsAPI from '../threads'; import * as StepsAPI from './steps'; +import { + CodeInterpreterLogs, + CodeInterpreterOutputImage, + CodeInterpreterToolCall, + CodeInterpreterToolCallDelta, + FileSearchToolCall, + FileSearchToolCallDelta, + FunctionToolCall, + FunctionToolCallDelta, + MessageCreationStepDetails, + RunStep, + RunStepDelta, + RunStepDeltaEvent, + RunStepDeltaMessageDelta, + RunStepInclude, + RunStepsPage, + StepListParams, + StepRetrieveParams, + Steps, + ToolCall, + ToolCallDelta, + ToolCallDeltaObject, + ToolCallsStepDetails, +} from './steps'; import { CursorPage, type CursorPageParams } from '../../../../pagination'; import { Stream } from '../../../../streaming'; @@ -21,6 +45,14 @@ export class Runs extends APIResource { /** * Create a run. + * + * @example + * ```ts + * const run = await client.beta.threads.runs.create( + * 'thread_id', + * { assistant_id: 'assistant_id' }, + * ); + * ``` */ create( threadId: string, @@ -54,6 +86,14 @@ export class Runs extends APIResource { /** * Retrieves a run. + * + * @example + * ```ts + * const run = await client.beta.threads.runs.retrieve( + * 'thread_id', + * 'run_id', + * ); + * ``` */ retrieve(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}/runs/${runId}`, { @@ -64,6 +104,14 @@ export class Runs extends APIResource { /** * Modifies a run. + * + * @example + * ```ts + * const run = await client.beta.threads.runs.update( + * 'thread_id', + * 'run_id', + * ); + * ``` */ update( threadId: string, @@ -80,6 +128,16 @@ export class Runs extends APIResource { /** * Returns a list of runs belonging to a thread. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const run of client.beta.threads.runs.list( + * 'thread_id', + * )) { + * // ... + * } + * ``` */ list( threadId: string, @@ -104,6 +162,14 @@ export class Runs extends APIResource { /** * Cancels a run that is `in_progress`. + * + * @example + * ```ts + * const run = await client.beta.threads.runs.cancel( + * 'thread_id', + * 'run_id', + * ); + * ``` */ cancel(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.post(`/threads/${threadId}/runs/${runId}/cancel`, { @@ -205,6 +271,16 @@ export class Runs extends APIResource { * `submit_tool_outputs`, this endpoint can be used to submit the outputs from the * tool calls once they're all completed. All outputs must be submitted in a single * request. + * + * @example + * ```ts + * const run = + * await client.beta.threads.runs.submitToolOutputs( + * 'thread_id', + * 'run_id', + * { tool_outputs: [{}] }, + * ); + * ``` */ submitToolOutputs( threadId: string, @@ -391,11 +467,13 @@ export interface Run { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The model that the @@ -411,7 +489,7 @@ export interface Run { /** * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ parallel_tool_calls: boolean; @@ -424,8 +502,8 @@ export interface Run { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -636,7 +714,7 @@ export interface RunCreateParamsBase { * search result content. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ include?: Array; @@ -681,10 +759,12 @@ export interface RunCreateParamsBase { /** * Body param: Set of 16 key-value pairs that can be attached to an object. This * can be useful for storing additional information about the object in a - * structured format. Keys can be a maximum of 64 characters long and values can be - * a maxium of 512 characters long. + * structured format, and querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * Body param: The ID of the @@ -693,19 +773,29 @@ export interface RunCreateParamsBase { * associated with the assistant. If not, the model associated with the assistant * will be used. */ - model?: (string & {}) | ChatAPI.ChatModel | null; + model?: (string & {}) | Shared.ChatModel | null; /** * Body param: Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ parallel_tool_calls?: boolean; + /** + * Body param: **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + /** * Body param: Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -799,11 +889,13 @@ export namespace RunCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export namespace AdditionalMessage { @@ -874,19 +966,21 @@ export interface RunCreateParamsStreaming extends RunCreateParamsBase { export interface RunUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export interface RunListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; @@ -1619,44 +1713,53 @@ export namespace RunSubmitToolOutputsStreamParams { } } -export namespace Runs { - export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall; - export import Run = RunsAPI.Run; - export import RunStatus = RunsAPI.RunStatus; - export import RunsPage = RunsAPI.RunsPage; - export import RunCreateParams = RunsAPI.RunCreateParams; - export import RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; - export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; - export import RunUpdateParams = RunsAPI.RunUpdateParams; - export import RunListParams = RunsAPI.RunListParams; - export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams; - export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams; - export import RunStreamParams = RunsAPI.RunStreamParams; - export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams; - export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming; - export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming; - export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams; - export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams; - export import Steps = StepsAPI.Steps; - export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs; - export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage; - export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall; - export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta; - export import FileSearchToolCall = StepsAPI.FileSearchToolCall; - export import FileSearchToolCallDelta = StepsAPI.FileSearchToolCallDelta; - export import FunctionToolCall = StepsAPI.FunctionToolCall; - export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta; - export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails; - export import RunStep = StepsAPI.RunStep; - export import RunStepDelta = StepsAPI.RunStepDelta; - export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent; - export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta; - export import RunStepInclude = StepsAPI.RunStepInclude; - export import ToolCall = StepsAPI.ToolCall; - export import ToolCallDelta = StepsAPI.ToolCallDelta; - export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject; - export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails; - export import RunStepsPage = StepsAPI.RunStepsPage; - export import StepRetrieveParams = StepsAPI.StepRetrieveParams; - export import StepListParams = StepsAPI.StepListParams; +Runs.RunsPage = RunsPage; +Runs.Steps = Steps; +Runs.RunStepsPage = RunStepsPage; + +export declare namespace Runs { + export { + type RequiredActionFunctionToolCall as RequiredActionFunctionToolCall, + type Run as Run, + type RunStatus as RunStatus, + RunsPage as RunsPage, + type RunCreateParams as RunCreateParams, + type RunCreateParamsNonStreaming as RunCreateParamsNonStreaming, + type RunCreateParamsStreaming as RunCreateParamsStreaming, + type RunUpdateParams as RunUpdateParams, + type RunListParams as RunListParams, + type RunCreateAndPollParams, + type RunCreateAndStreamParams, + type RunStreamParams, + type RunSubmitToolOutputsParams as RunSubmitToolOutputsParams, + type RunSubmitToolOutputsParamsNonStreaming as RunSubmitToolOutputsParamsNonStreaming, + type RunSubmitToolOutputsParamsStreaming as RunSubmitToolOutputsParamsStreaming, + type RunSubmitToolOutputsAndPollParams, + type RunSubmitToolOutputsStreamParams, + }; + + export { + Steps as Steps, + type CodeInterpreterLogs as CodeInterpreterLogs, + type CodeInterpreterOutputImage as CodeInterpreterOutputImage, + type CodeInterpreterToolCall as CodeInterpreterToolCall, + type CodeInterpreterToolCallDelta as CodeInterpreterToolCallDelta, + type FileSearchToolCall as FileSearchToolCall, + type FileSearchToolCallDelta as FileSearchToolCallDelta, + type FunctionToolCall as FunctionToolCall, + type FunctionToolCallDelta as FunctionToolCallDelta, + type MessageCreationStepDetails as MessageCreationStepDetails, + type RunStep as RunStep, + type RunStepDelta as RunStepDelta, + type RunStepDeltaEvent as RunStepDeltaEvent, + type RunStepDeltaMessageDelta as RunStepDeltaMessageDelta, + type RunStepInclude as RunStepInclude, + type ToolCall as ToolCall, + type ToolCallDelta as ToolCallDelta, + type ToolCallDeltaObject as ToolCallDeltaObject, + type ToolCallsStepDetails as ToolCallsStepDetails, + RunStepsPage as RunStepsPage, + type StepRetrieveParams as StepRetrieveParams, + type StepListParams as StepListParams, + }; } diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts index c076191a3..abd8d40ed 100644 --- a/src/resources/beta/threads/runs/steps.ts +++ b/src/resources/beta/threads/runs/steps.ts @@ -4,11 +4,22 @@ import { APIResource } from '../../../../resource'; import { isRequestOptions } from '../../../../core'; import * as Core from '../../../../core'; import * as StepsAPI from './steps'; +import * as Shared from '../../../shared'; import { CursorPage, type CursorPageParams } from '../../../../pagination'; export class Steps extends APIResource { /** * Retrieves a run step. + * + * @example + * ```ts + * const runStep = + * await client.beta.threads.runs.steps.retrieve( + * 'thread_id', + * 'run_id', + * 'step_id', + * ); + * ``` */ retrieve( threadId: string, @@ -42,6 +53,17 @@ export class Steps extends APIResource { /** * Returns a list of run steps belonging to a run. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const runStep of client.beta.threads.runs.steps.list( + * 'thread_id', + * 'run_id', + * )) { + * // ... + * } + * ``` */ list( threadId: string, @@ -515,11 +537,13 @@ export interface RunStep { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The object type, which is always `thread.run.step`. @@ -705,7 +729,7 @@ export interface StepRetrieveParams { * to fetch the file search result content. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ include?: Array; @@ -715,8 +739,8 @@ export interface StepListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; @@ -726,7 +750,7 @@ export interface StepListParams extends CursorPageParams { * to fetch the file search result content. * * See the - * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings) + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ include?: Array; @@ -738,26 +762,30 @@ export interface StepListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace Steps { - export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs; - export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage; - export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall; - export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta; - export import FileSearchToolCall = StepsAPI.FileSearchToolCall; - export import FileSearchToolCallDelta = StepsAPI.FileSearchToolCallDelta; - export import FunctionToolCall = StepsAPI.FunctionToolCall; - export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta; - export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails; - export import RunStep = StepsAPI.RunStep; - export import RunStepDelta = StepsAPI.RunStepDelta; - export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent; - export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta; - export import RunStepInclude = StepsAPI.RunStepInclude; - export import ToolCall = StepsAPI.ToolCall; - export import ToolCallDelta = StepsAPI.ToolCallDelta; - export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject; - export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails; - export import RunStepsPage = StepsAPI.RunStepsPage; - export import StepRetrieveParams = StepsAPI.StepRetrieveParams; - export import StepListParams = StepsAPI.StepListParams; +Steps.RunStepsPage = RunStepsPage; + +export declare namespace Steps { + export { + type CodeInterpreterLogs as CodeInterpreterLogs, + type CodeInterpreterOutputImage as CodeInterpreterOutputImage, + type CodeInterpreterToolCall as CodeInterpreterToolCall, + type CodeInterpreterToolCallDelta as CodeInterpreterToolCallDelta, + type FileSearchToolCall as FileSearchToolCall, + type FileSearchToolCallDelta as FileSearchToolCallDelta, + type FunctionToolCall as FunctionToolCall, + type FunctionToolCallDelta as FunctionToolCallDelta, + type MessageCreationStepDetails as MessageCreationStepDetails, + type RunStep as RunStep, + type RunStepDelta as RunStepDelta, + type RunStepDeltaEvent as RunStepDeltaEvent, + type RunStepDeltaMessageDelta as RunStepDeltaMessageDelta, + type RunStepInclude as RunStepInclude, + type ToolCall as ToolCall, + type ToolCallDelta as ToolCallDelta, + type ToolCallDeltaObject as ToolCallDeltaObject, + type ToolCallsStepDetails as ToolCallsStepDetails, + RunStepsPage as RunStepsPage, + type StepRetrieveParams as StepRetrieveParams, + type StepListParams as StepListParams, + }; } diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index be959eb30..c0c6bc8e4 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -8,10 +8,63 @@ import * as Core from '../../../core'; import * as ThreadsAPI from './threads'; import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; -import * as ChatAPI from '../../chat/chat'; import * as MessagesAPI from './messages'; -import * as VectorStoresAPI from '../vector-stores/vector-stores'; +import { + Annotation, + AnnotationDelta, + FileCitationAnnotation, + FileCitationDeltaAnnotation, + FilePathAnnotation, + FilePathDeltaAnnotation, + ImageFile, + ImageFileContentBlock, + ImageFileDelta, + ImageFileDeltaBlock, + ImageURL, + ImageURLContentBlock, + ImageURLDelta, + ImageURLDeltaBlock, + Message as MessagesAPIMessage, + MessageContent, + MessageContentDelta, + MessageContentPartParam, + MessageCreateParams, + MessageDeleted, + MessageDelta, + MessageDeltaEvent, + MessageListParams, + MessageUpdateParams, + Messages, + MessagesPage, + RefusalContentBlock, + RefusalDeltaBlock, + Text, + TextContentBlock, + TextContentBlockParam, + TextDelta, + TextDeltaBlock, +} from './messages'; import * as RunsAPI from './runs/runs'; +import { + RequiredActionFunctionToolCall, + Run, + RunCreateAndPollParams, + RunCreateAndStreamParams, + RunCreateParams, + RunCreateParamsNonStreaming, + RunCreateParamsStreaming, + RunListParams, + RunStatus, + RunStreamParams, + RunSubmitToolOutputsAndPollParams, + RunSubmitToolOutputsParams, + RunSubmitToolOutputsParamsNonStreaming, + RunSubmitToolOutputsParamsStreaming, + RunSubmitToolOutputsStreamParams, + RunUpdateParams, + Runs, + RunsPage, +} from './runs/runs'; import { Stream } from '../../../streaming'; export class Threads extends APIResource { @@ -20,6 +73,11 @@ export class Threads extends APIResource { /** * Create a thread. + * + * @example + * ```ts + * const thread = await client.beta.threads.create(); + * ``` */ create(body?: ThreadCreateParams, options?: Core.RequestOptions): Core.APIPromise; create(options?: Core.RequestOptions): Core.APIPromise; @@ -39,6 +97,13 @@ export class Threads extends APIResource { /** * Retrieves a thread. + * + * @example + * ```ts + * const thread = await client.beta.threads.retrieve( + * 'thread_id', + * ); + * ``` */ retrieve(threadId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/threads/${threadId}`, { @@ -49,6 +114,13 @@ export class Threads extends APIResource { /** * Modifies a thread. + * + * @example + * ```ts + * const thread = await client.beta.threads.update( + * 'thread_id', + * ); + * ``` */ update(threadId: string, body: ThreadUpdateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post(`/threads/${threadId}`, { @@ -60,6 +132,13 @@ export class Threads extends APIResource { /** * Delete a thread. + * + * @example + * ```ts + * const threadDeleted = await client.beta.threads.del( + * 'thread_id', + * ); + * ``` */ del(threadId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.delete(`/threads/${threadId}`, { @@ -70,6 +149,13 @@ export class Threads extends APIResource { /** * Create a thread and run it in one request. + * + * @example + * ```ts + * const run = await client.beta.threads.createAndRun({ + * assistant_id: 'assistant_id', + * }); + * ``` */ createAndRun( body: ThreadCreateAndRunParamsNonStreaming, @@ -121,8 +207,8 @@ export class Threads extends APIResource { /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -195,11 +281,13 @@ export interface Thread { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The object type, which is always `thread`. @@ -267,11 +355,13 @@ export interface ThreadCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * A set of resources that are made available to the assistant's tools in this @@ -306,11 +396,13 @@ export namespace ThreadCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export namespace Message { @@ -380,9 +472,9 @@ export namespace ThreadCreateParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -392,12 +484,53 @@ export namespace ThreadCreateParams { file_ids?: Array; /** - * Set of 16 key-value pairs that can be attached to a vector store. This can be - * useful for storing additional information about the vector store in a structured - * format. Keys can be a maximum of 64 characters long and values can be a maxium - * of 512 characters long. + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown; + metadata?: Shared.Metadata | null; + } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } } } } @@ -406,11 +539,13 @@ export namespace ThreadCreateParams { export interface ThreadUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * A set of resources that are made available to the assistant's tools in this @@ -494,11 +629,13 @@ export interface ThreadCreateAndRunParamsBase { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to @@ -506,19 +643,19 @@ export interface ThreadCreateAndRunParamsBase { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: (string & {}) | ChatAPI.ChatModel | null; + model?: (string & {}) | Shared.ChatModel | null; /** * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ parallel_tool_calls?: boolean; /** * Specifies the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. * * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured @@ -554,7 +691,8 @@ export interface ThreadCreateAndRunParamsBase { temperature?: number | null; /** - * If no thread is provided, an empty thread will be created. + * Options to create a new thread. If no thread is provided when running a request, + * an empty thread will be created. */ thread?: ThreadCreateAndRunParams.Thread; @@ -581,9 +719,7 @@ export interface ThreadCreateAndRunParamsBase { * Override the tools the assistant can use for this run. This is useful for * modifying the behavior on a per-run basis. */ - tools?: Array< - AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool | AssistantsAPI.FunctionTool - > | null; + tools?: Array | null; /** * An alternative to sampling with temperature, called nucleus sampling, where the @@ -603,7 +739,8 @@ export interface ThreadCreateAndRunParamsBase { export namespace ThreadCreateAndRunParams { /** - * If no thread is provided, an empty thread will be created. + * Options to create a new thread. If no thread is provided when running a request, + * an empty thread will be created. */ export interface Thread { /** @@ -614,11 +751,13 @@ export namespace ThreadCreateAndRunParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * A set of resources that are made available to the assistant's tools in this @@ -653,11 +792,13 @@ export namespace ThreadCreateAndRunParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; } export namespace Message { @@ -727,9 +868,9 @@ export namespace ThreadCreateAndRunParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -739,12 +880,53 @@ export namespace ThreadCreateAndRunParams { file_ids?: Array; /** - * Set of 16 key-value pairs that can be attached to a vector store. This can be - * useful for storing additional information about the vector store in a structured - * format. Keys can be a maximum of 64 characters long and values can be a maxium - * of 512 characters long. + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown; + metadata?: Shared.Metadata | null; + } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } } } } @@ -1489,69 +1671,84 @@ export namespace ThreadCreateAndRunStreamParams { } } -export namespace Threads { - export import AssistantResponseFormatOption = ThreadsAPI.AssistantResponseFormatOption; - export import AssistantToolChoice = ThreadsAPI.AssistantToolChoice; - export import AssistantToolChoiceFunction = ThreadsAPI.AssistantToolChoiceFunction; - export import AssistantToolChoiceOption = ThreadsAPI.AssistantToolChoiceOption; - export import Thread = ThreadsAPI.Thread; - export import ThreadDeleted = ThreadsAPI.ThreadDeleted; - export import ThreadCreateParams = ThreadsAPI.ThreadCreateParams; - export import ThreadUpdateParams = ThreadsAPI.ThreadUpdateParams; - export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams; - export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming; - export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming; - export import ThreadCreateAndRunPollParams = ThreadsAPI.ThreadCreateAndRunPollParams; - export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams; - export import Runs = RunsAPI.Runs; - export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall; - export import Run = RunsAPI.Run; - export import RunStatus = RunsAPI.RunStatus; - export import RunsPage = RunsAPI.RunsPage; - export import RunCreateParams = RunsAPI.RunCreateParams; - export import RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming; - export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming; - export import RunUpdateParams = RunsAPI.RunUpdateParams; - export import RunListParams = RunsAPI.RunListParams; - export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams; - export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams; - export import RunStreamParams = RunsAPI.RunStreamParams; - export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams; - export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming; - export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming; - export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams; - export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams; - export import Messages = MessagesAPI.Messages; - export import Annotation = MessagesAPI.Annotation; - export import AnnotationDelta = MessagesAPI.AnnotationDelta; - export import FileCitationAnnotation = MessagesAPI.FileCitationAnnotation; - export import FileCitationDeltaAnnotation = MessagesAPI.FileCitationDeltaAnnotation; - export import FilePathAnnotation = MessagesAPI.FilePathAnnotation; - export import FilePathDeltaAnnotation = MessagesAPI.FilePathDeltaAnnotation; - export import ImageFile = MessagesAPI.ImageFile; - export import ImageFileContentBlock = MessagesAPI.ImageFileContentBlock; - export import ImageFileDelta = MessagesAPI.ImageFileDelta; - export import ImageFileDeltaBlock = MessagesAPI.ImageFileDeltaBlock; - export import ImageURL = MessagesAPI.ImageURL; - export import ImageURLContentBlock = MessagesAPI.ImageURLContentBlock; - export import ImageURLDelta = MessagesAPI.ImageURLDelta; - export import ImageURLDeltaBlock = MessagesAPI.ImageURLDeltaBlock; - export import Message = MessagesAPI.Message; - export import MessageContent = MessagesAPI.MessageContent; - export import MessageContentDelta = MessagesAPI.MessageContentDelta; - export import MessageContentPartParam = MessagesAPI.MessageContentPartParam; - export import MessageDeleted = MessagesAPI.MessageDeleted; - export import MessageDelta = MessagesAPI.MessageDelta; - export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent; - export import RefusalContentBlock = MessagesAPI.RefusalContentBlock; - export import RefusalDeltaBlock = MessagesAPI.RefusalDeltaBlock; - export import Text = MessagesAPI.Text; - export import TextContentBlock = MessagesAPI.TextContentBlock; - export import TextContentBlockParam = MessagesAPI.TextContentBlockParam; - export import TextDelta = MessagesAPI.TextDelta; - export import TextDeltaBlock = MessagesAPI.TextDeltaBlock; - export import MessagesPage = MessagesAPI.MessagesPage; - export import MessageCreateParams = MessagesAPI.MessageCreateParams; - export import MessageUpdateParams = MessagesAPI.MessageUpdateParams; - export import MessageListParams = MessagesAPI.MessageListParams; +Threads.Runs = Runs; +Threads.RunsPage = RunsPage; +Threads.Messages = Messages; +Threads.MessagesPage = MessagesPage; + +export declare namespace Threads { + export { + type AssistantResponseFormatOption as AssistantResponseFormatOption, + type AssistantToolChoice as AssistantToolChoice, + type AssistantToolChoiceFunction as AssistantToolChoiceFunction, + type AssistantToolChoiceOption as AssistantToolChoiceOption, + type Thread as Thread, + type ThreadDeleted as ThreadDeleted, + type ThreadCreateParams as ThreadCreateParams, + type ThreadUpdateParams as ThreadUpdateParams, + type ThreadCreateAndRunParams as ThreadCreateAndRunParams, + type ThreadCreateAndRunParamsNonStreaming as ThreadCreateAndRunParamsNonStreaming, + type ThreadCreateAndRunParamsStreaming as ThreadCreateAndRunParamsStreaming, + type ThreadCreateAndRunPollParams, + type ThreadCreateAndRunStreamParams, + }; + + export { + Runs as Runs, + type RequiredActionFunctionToolCall as RequiredActionFunctionToolCall, + type Run as Run, + type RunStatus as RunStatus, + RunsPage as RunsPage, + type RunCreateParams as RunCreateParams, + type RunCreateParamsNonStreaming as RunCreateParamsNonStreaming, + type RunCreateParamsStreaming as RunCreateParamsStreaming, + type RunUpdateParams as RunUpdateParams, + type RunListParams as RunListParams, + type RunCreateAndPollParams, + type RunCreateAndStreamParams, + type RunStreamParams, + type RunSubmitToolOutputsParams as RunSubmitToolOutputsParams, + type RunSubmitToolOutputsParamsNonStreaming as RunSubmitToolOutputsParamsNonStreaming, + type RunSubmitToolOutputsParamsStreaming as RunSubmitToolOutputsParamsStreaming, + type RunSubmitToolOutputsAndPollParams, + type RunSubmitToolOutputsStreamParams, + }; + + export { + Messages as Messages, + type Annotation as Annotation, + type AnnotationDelta as AnnotationDelta, + type FileCitationAnnotation as FileCitationAnnotation, + type FileCitationDeltaAnnotation as FileCitationDeltaAnnotation, + type FilePathAnnotation as FilePathAnnotation, + type FilePathDeltaAnnotation as FilePathDeltaAnnotation, + type ImageFile as ImageFile, + type ImageFileContentBlock as ImageFileContentBlock, + type ImageFileDelta as ImageFileDelta, + type ImageFileDeltaBlock as ImageFileDeltaBlock, + type ImageURL as ImageURL, + type ImageURLContentBlock as ImageURLContentBlock, + type ImageURLDelta as ImageURLDelta, + type ImageURLDeltaBlock as ImageURLDeltaBlock, + type MessagesAPIMessage as Message, + type MessageContent as MessageContent, + type MessageContentDelta as MessageContentDelta, + type MessageContentPartParam as MessageContentPartParam, + type MessageDeleted as MessageDeleted, + type MessageDelta as MessageDelta, + type MessageDeltaEvent as MessageDeltaEvent, + type RefusalContentBlock as RefusalContentBlock, + type RefusalDeltaBlock as RefusalDeltaBlock, + type Text as Text, + type TextContentBlock as TextContentBlock, + type TextContentBlockParam as TextContentBlockParam, + type TextDelta as TextDelta, + type TextDeltaBlock as TextDeltaBlock, + MessagesPage as MessagesPage, + type MessageCreateParams as MessageCreateParams, + type MessageUpdateParams as MessageUpdateParams, + type MessageListParams as MessageListParams, + }; + + export { AssistantStream }; } diff --git a/src/resources/beta/vector-stores/index.ts b/src/resources/beta/vector-stores/index.ts deleted file mode 100644 index f70215f8f..000000000 --- a/src/resources/beta/vector-stores/index.ts +++ /dev/null @@ -1,32 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -export { - AutoFileChunkingStrategyParam, - FileChunkingStrategy, - FileChunkingStrategyParam, - OtherFileChunkingStrategyObject, - StaticFileChunkingStrategy, - StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyParam, - VectorStore, - VectorStoreDeleted, - VectorStoreCreateParams, - VectorStoreUpdateParams, - VectorStoreListParams, - VectorStoresPage, - VectorStores, -} from './vector-stores'; -export { - VectorStoreFile, - VectorStoreFileDeleted, - FileCreateParams, - FileListParams, - VectorStoreFilesPage, - Files, -} from './files'; -export { - VectorStoreFileBatch, - FileBatchCreateParams, - FileBatchListFilesParams, - FileBatches, -} from './file-batches'; diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 5bc7de955..9dbc636d8 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,77 +1,109 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; -import * as ChatAPI from './chat'; -import * as CompletionsAPI from './completions'; +import * as Shared from '../shared'; +import * as CompletionsAPI from './completions/completions'; +import { + ChatCompletion, + ChatCompletionAssistantMessageParam, + ChatCompletionAudio, + ChatCompletionAudioParam, + ChatCompletionChunk, + ChatCompletionContentPart, + ChatCompletionContentPartImage, + ChatCompletionContentPartInputAudio, + ChatCompletionContentPartRefusal, + ChatCompletionContentPartText, + ChatCompletionCreateParams, + ChatCompletionCreateParamsNonStreaming, + ChatCompletionCreateParamsStreaming, + ChatCompletionDeleted, + ChatCompletionDeveloperMessageParam, + ChatCompletionFunctionCallOption, + ChatCompletionFunctionMessageParam, + ChatCompletionListParams, + ChatCompletionMessage, + ChatCompletionMessageParam, + ChatCompletionMessageToolCall, + ChatCompletionModality, + ChatCompletionNamedToolChoice, + ChatCompletionPredictionContent, + ChatCompletionReasoningEffort, + ChatCompletionRole, + ChatCompletionStoreMessage, + ChatCompletionStreamOptions, + ChatCompletionSystemMessageParam, + ChatCompletionTokenLogprob, + ChatCompletionTool, + ChatCompletionToolChoiceOption, + ChatCompletionToolMessageParam, + ChatCompletionUpdateParams, + ChatCompletionUserMessageParam, + ChatCompletionsPage, + CompletionCreateParams, + CompletionCreateParamsNonStreaming, + CompletionCreateParamsStreaming, + CompletionListParams, + CompletionUpdateParams, + Completions, + CreateChatCompletionRequestMessage, +} from './completions/completions'; export class Chat extends APIResource { completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); } -export type ChatModel = - | 'o1-preview' - | 'o1-preview-2024-09-12' - | 'o1-mini' - | 'o1-mini-2024-09-12' - | 'gpt-4o' - | 'gpt-4o-2024-08-06' - | 'gpt-4o-2024-05-13' - | 'gpt-4o-realtime-preview-2024-10-01' - | 'chatgpt-4o-latest' - | 'gpt-4o-mini' - | 'gpt-4o-mini-2024-07-18' - | 'gpt-4-turbo' - | 'gpt-4-turbo-2024-04-09' - | 'gpt-4-0125-preview' - | 'gpt-4-turbo-preview' - | 'gpt-4-1106-preview' - | 'gpt-4-vision-preview' - | 'gpt-4' - | 'gpt-4-0314' - | 'gpt-4-0613' - | 'gpt-4-32k' - | 'gpt-4-32k-0314' - | 'gpt-4-32k-0613' - | 'gpt-3.5-turbo' - | 'gpt-3.5-turbo-16k' - | 'gpt-3.5-turbo-0301' - | 'gpt-3.5-turbo-0613' - | 'gpt-3.5-turbo-1106' - | 'gpt-3.5-turbo-0125' - | 'gpt-3.5-turbo-16k-0613'; +export type ChatModel = Shared.ChatModel; -export namespace Chat { - export import ChatModel = ChatAPI.ChatModel; - export import Completions = CompletionsAPI.Completions; - export import ChatCompletion = CompletionsAPI.ChatCompletion; - export import ChatCompletionAssistantMessageParam = CompletionsAPI.ChatCompletionAssistantMessageParam; - export import ChatCompletionChunk = CompletionsAPI.ChatCompletionChunk; - export import ChatCompletionContentPart = CompletionsAPI.ChatCompletionContentPart; - export import ChatCompletionContentPartImage = CompletionsAPI.ChatCompletionContentPartImage; - export import ChatCompletionContentPartRefusal = CompletionsAPI.ChatCompletionContentPartRefusal; - export import ChatCompletionContentPartText = CompletionsAPI.ChatCompletionContentPartText; - export import ChatCompletionFunctionCallOption = CompletionsAPI.ChatCompletionFunctionCallOption; - export import ChatCompletionFunctionMessageParam = CompletionsAPI.ChatCompletionFunctionMessageParam; - export import ChatCompletionMessage = CompletionsAPI.ChatCompletionMessage; - export import ChatCompletionMessageParam = CompletionsAPI.ChatCompletionMessageParam; - export import ChatCompletionMessageToolCall = CompletionsAPI.ChatCompletionMessageToolCall; - export import ChatCompletionNamedToolChoice = CompletionsAPI.ChatCompletionNamedToolChoice; - export import ChatCompletionRole = CompletionsAPI.ChatCompletionRole; - export import ChatCompletionStreamOptions = CompletionsAPI.ChatCompletionStreamOptions; - export import ChatCompletionSystemMessageParam = CompletionsAPI.ChatCompletionSystemMessageParam; - export import ChatCompletionTokenLogprob = CompletionsAPI.ChatCompletionTokenLogprob; - export import ChatCompletionTool = CompletionsAPI.ChatCompletionTool; - export import ChatCompletionToolChoiceOption = CompletionsAPI.ChatCompletionToolChoiceOption; - export import ChatCompletionToolMessageParam = CompletionsAPI.ChatCompletionToolMessageParam; - export import ChatCompletionUserMessageParam = CompletionsAPI.ChatCompletionUserMessageParam; - /** - * @deprecated ChatCompletionMessageParam should be used instead - */ - export import CreateChatCompletionRequestMessage = CompletionsAPI.CreateChatCompletionRequestMessage; - export import ChatCompletionCreateParams = CompletionsAPI.ChatCompletionCreateParams; - export import CompletionCreateParams = CompletionsAPI.CompletionCreateParams; - export import ChatCompletionCreateParamsNonStreaming = CompletionsAPI.ChatCompletionCreateParamsNonStreaming; - export import CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming; - export import ChatCompletionCreateParamsStreaming = CompletionsAPI.ChatCompletionCreateParamsStreaming; - export import CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming; +Chat.Completions = Completions; +Chat.ChatCompletionsPage = ChatCompletionsPage; + +export declare namespace Chat { + export { type ChatModel as ChatModel }; + + export { + Completions as Completions, + type ChatCompletion as ChatCompletion, + type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, + type ChatCompletionAudio as ChatCompletionAudio, + type ChatCompletionAudioParam as ChatCompletionAudioParam, + type ChatCompletionChunk as ChatCompletionChunk, + type ChatCompletionContentPart as ChatCompletionContentPart, + type ChatCompletionContentPartImage as ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeleted as ChatCompletionDeleted, + type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, + type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, + type ChatCompletionMessage as ChatCompletionMessage, + type ChatCompletionMessageParam as ChatCompletionMessageParam, + type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, + type ChatCompletionModality as ChatCompletionModality, + type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent as ChatCompletionPredictionContent, + type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStoreMessage as ChatCompletionStoreMessage, + type ChatCompletionStreamOptions as ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, + type ChatCompletionTool as ChatCompletionTool, + type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, + ChatCompletionsPage as ChatCompletionsPage, + type ChatCompletionCreateParams as ChatCompletionCreateParams, + type CompletionCreateParams as CompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + type ChatCompletionUpdateParams as ChatCompletionUpdateParams, + type CompletionUpdateParams as CompletionUpdateParams, + type ChatCompletionListParams as ChatCompletionListParams, + type CompletionListParams as CompletionListParams, + }; } diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts index 27aebdc4c..55b151e8b 100644 --- a/src/resources/chat/completions.ts +++ b/src/resources/chat/completions.ts @@ -1,1081 +1 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import { APIResource } from '../../resource'; -import { APIPromise } from '../../core'; -import * as Core from '../../core'; -import * as ChatCompletionsAPI from './completions'; -import * as CompletionsAPI from '../completions'; -import * as Shared from '../shared'; -import * as ChatAPI from './chat'; -import { Stream } from '../../streaming'; - -export class Completions extends APIResource { - /** - * Creates a model response for the given chat conversation. - */ - create( - body: ChatCompletionCreateParamsNonStreaming, - options?: Core.RequestOptions, - ): APIPromise; - create( - body: ChatCompletionCreateParamsStreaming, - options?: Core.RequestOptions, - ): APIPromise>; - create( - body: ChatCompletionCreateParamsBase, - options?: Core.RequestOptions, - ): APIPromise | ChatCompletion>; - create( - body: ChatCompletionCreateParams, - options?: Core.RequestOptions, - ): APIPromise | APIPromise> { - return this._client.post('/chat/completions', { body, ...options, stream: body.stream ?? false }) as - | APIPromise - | APIPromise>; - } -} - -/** - * Represents a chat completion response returned by model, based on the provided - * input. - */ -export interface ChatCompletion { - /** - * A unique identifier for the chat completion. - */ - id: string; - - /** - * A list of chat completion choices. Can be more than one if `n` is greater - * than 1. - */ - choices: Array; - - /** - * The Unix timestamp (in seconds) of when the chat completion was created. - */ - created: number; - - /** - * The model used for the chat completion. - */ - model: string; - - /** - * The object type, which is always `chat.completion`. - */ - object: 'chat.completion'; - - /** - * The service tier used for processing the request. This field is only included if - * the `service_tier` parameter is specified in the request. - */ - service_tier?: 'scale' | 'default' | null; - - /** - * This fingerprint represents the backend configuration that the model runs with. - * - * Can be used in conjunction with the `seed` request parameter to understand when - * backend changes have been made that might impact determinism. - */ - system_fingerprint?: string; - - /** - * Usage statistics for the completion request. - */ - usage?: CompletionsAPI.CompletionUsage; -} - -export namespace ChatCompletion { - export interface Choice { - /** - * The reason the model stopped generating tokens. This will be `stop` if the model - * hit a natural stop point or a provided stop sequence, `length` if the maximum - * number of tokens specified in the request was reached, `content_filter` if - * content was omitted due to a flag from our content filters, `tool_calls` if the - * model called a tool, or `function_call` (deprecated) if the model called a - * function. - */ - finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call'; - - /** - * The index of the choice in the list of choices. - */ - index: number; - - /** - * Log probability information for the choice. - */ - logprobs: Choice.Logprobs | null; - - /** - * A chat completion message generated by the model. - */ - message: ChatCompletionsAPI.ChatCompletionMessage; - } - - export namespace Choice { - /** - * Log probability information for the choice. - */ - export interface Logprobs { - /** - * A list of message content tokens with log probability information. - */ - content: Array | null; - - /** - * A list of message refusal tokens with log probability information. - */ - refusal: Array | null; - } - } -} - -export interface ChatCompletionAssistantMessageParam { - /** - * The role of the messages author, in this case `assistant`. - */ - role: 'assistant'; - - /** - * The contents of the assistant message. Required unless `tool_calls` or - * `function_call` is specified. - */ - content?: string | Array | null; - - /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. - */ - function_call?: ChatCompletionAssistantMessageParam.FunctionCall | null; - - /** - * An optional name for the participant. Provides the model information to - * differentiate between participants of the same role. - */ - name?: string; - - /** - * The refusal message by the assistant. - */ - refusal?: string | null; - - /** - * The tool calls generated by the model, such as function calls. - */ - tool_calls?: Array; -} - -export namespace ChatCompletionAssistantMessageParam { - /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. - */ - export interface FunctionCall { - /** - * The arguments to call the function with, as generated by the model in JSON - * format. Note that the model does not always generate valid JSON, and may - * hallucinate parameters not defined by your function schema. Validate the - * arguments in your code before calling your function. - */ - arguments: string; - - /** - * The name of the function to call. - */ - name: string; - } -} - -/** - * Represents a streamed chunk of a chat completion response returned by model, - * based on the provided input. - */ -export interface ChatCompletionChunk { - /** - * A unique identifier for the chat completion. Each chunk has the same ID. - */ - id: string; - - /** - * A list of chat completion choices. Can contain more than one elements if `n` is - * greater than 1. Can also be empty for the last chunk if you set - * `stream_options: {"include_usage": true}`. - */ - choices: Array; - - /** - * The Unix timestamp (in seconds) of when the chat completion was created. Each - * chunk has the same timestamp. - */ - created: number; - - /** - * The model to generate the completion. - */ - model: string; - - /** - * The object type, which is always `chat.completion.chunk`. - */ - object: 'chat.completion.chunk'; - - /** - * The service tier used for processing the request. This field is only included if - * the `service_tier` parameter is specified in the request. - */ - service_tier?: 'scale' | 'default' | null; - - /** - * This fingerprint represents the backend configuration that the model runs with. - * Can be used in conjunction with the `seed` request parameter to understand when - * backend changes have been made that might impact determinism. - */ - system_fingerprint?: string; - - /** - * An optional field that will only be present when you set - * `stream_options: {"include_usage": true}` in your request. When present, it - * contains a null value except for the last chunk which contains the token usage - * statistics for the entire request. - */ - usage?: CompletionsAPI.CompletionUsage; -} - -export namespace ChatCompletionChunk { - export interface Choice { - /** - * A chat completion delta generated by streamed model responses. - */ - delta: Choice.Delta; - - /** - * The reason the model stopped generating tokens. This will be `stop` if the model - * hit a natural stop point or a provided stop sequence, `length` if the maximum - * number of tokens specified in the request was reached, `content_filter` if - * content was omitted due to a flag from our content filters, `tool_calls` if the - * model called a tool, or `function_call` (deprecated) if the model called a - * function. - */ - finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | null; - - /** - * The index of the choice in the list of choices. - */ - index: number; - - /** - * Log probability information for the choice. - */ - logprobs?: Choice.Logprobs | null; - } - - export namespace Choice { - /** - * A chat completion delta generated by streamed model responses. - */ - export interface Delta { - /** - * The contents of the chunk message. - */ - content?: string | null; - - /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. - */ - function_call?: Delta.FunctionCall; - - /** - * The refusal message generated by the model. - */ - refusal?: string | null; - - /** - * The role of the author of this message. - */ - role?: 'system' | 'user' | 'assistant' | 'tool'; - - tool_calls?: Array; - } - - export namespace Delta { - /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. - */ - export interface FunctionCall { - /** - * The arguments to call the function with, as generated by the model in JSON - * format. Note that the model does not always generate valid JSON, and may - * hallucinate parameters not defined by your function schema. Validate the - * arguments in your code before calling your function. - */ - arguments?: string; - - /** - * The name of the function to call. - */ - name?: string; - } - - export interface ToolCall { - index: number; - - /** - * The ID of the tool call. - */ - id?: string; - - function?: ToolCall.Function; - - /** - * The type of the tool. Currently, only `function` is supported. - */ - type?: 'function'; - } - - export namespace ToolCall { - export interface Function { - /** - * The arguments to call the function with, as generated by the model in JSON - * format. Note that the model does not always generate valid JSON, and may - * hallucinate parameters not defined by your function schema. Validate the - * arguments in your code before calling your function. - */ - arguments?: string; - - /** - * The name of the function to call. - */ - name?: string; - } - } - } - - /** - * Log probability information for the choice. - */ - export interface Logprobs { - /** - * A list of message content tokens with log probability information. - */ - content: Array | null; - - /** - * A list of message refusal tokens with log probability information. - */ - refusal: Array | null; - } - } -} - -export type ChatCompletionContentPart = ChatCompletionContentPartText | ChatCompletionContentPartImage; - -export interface ChatCompletionContentPartImage { - image_url: ChatCompletionContentPartImage.ImageURL; - - /** - * The type of the content part. - */ - type: 'image_url'; -} - -export namespace ChatCompletionContentPartImage { - export interface ImageURL { - /** - * Either a URL of the image or the base64 encoded image data. - */ - url: string; - - /** - * Specifies the detail level of the image. Learn more in the - * [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). - */ - detail?: 'auto' | 'low' | 'high'; - } -} - -export interface ChatCompletionContentPartRefusal { - /** - * The refusal message generated by the model. - */ - refusal: string; - - /** - * The type of the content part. - */ - type: 'refusal'; -} - -export interface ChatCompletionContentPartText { - /** - * The text content. - */ - text: string; - - /** - * The type of the content part. - */ - type: 'text'; -} - -/** - * Specifying a particular function via `{"name": "my_function"}` forces the model - * to call that function. - */ -export interface ChatCompletionFunctionCallOption { - /** - * The name of the function to call. - */ - name: string; -} - -/** - * @deprecated - */ -export interface ChatCompletionFunctionMessageParam { - /** - * The contents of the function message. - */ - content: string | null; - - /** - * The name of the function to call. - */ - name: string; - - /** - * The role of the messages author, in this case `function`. - */ - role: 'function'; -} - -/** - * A chat completion message generated by the model. - */ -export interface ChatCompletionMessage { - /** - * The contents of the message. - */ - content: string | null; - - /** - * The refusal message generated by the model. - */ - refusal: string | null; - - /** - * The role of the author of this message. - */ - role: 'assistant'; - - /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. - */ - function_call?: ChatCompletionMessage.FunctionCall | null; - - /** - * The tool calls generated by the model, such as function calls. - */ - tool_calls?: Array; -} - -export namespace ChatCompletionMessage { - /** - * @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of - * a function that should be called, as generated by the model. - */ - export interface FunctionCall { - /** - * The arguments to call the function with, as generated by the model in JSON - * format. Note that the model does not always generate valid JSON, and may - * hallucinate parameters not defined by your function schema. Validate the - * arguments in your code before calling your function. - */ - arguments: string; - - /** - * The name of the function to call. - */ - name: string; - } -} - -export type ChatCompletionMessageParam = - | ChatCompletionSystemMessageParam - | ChatCompletionUserMessageParam - | ChatCompletionAssistantMessageParam - | ChatCompletionToolMessageParam - | ChatCompletionFunctionMessageParam; - -export interface ChatCompletionMessageToolCall { - /** - * The ID of the tool call. - */ - id: string; - - /** - * The function that the model called. - */ - function: ChatCompletionMessageToolCall.Function; - - /** - * The type of the tool. Currently, only `function` is supported. - */ - type: 'function'; -} - -export namespace ChatCompletionMessageToolCall { - /** - * The function that the model called. - */ - export interface Function { - /** - * The arguments to call the function with, as generated by the model in JSON - * format. Note that the model does not always generate valid JSON, and may - * hallucinate parameters not defined by your function schema. Validate the - * arguments in your code before calling your function. - */ - arguments: string; - - /** - * The name of the function to call. - */ - name: string; - } -} - -/** - * Specifies a tool the model should use. Use to force the model to call a specific - * function. - */ -export interface ChatCompletionNamedToolChoice { - function: ChatCompletionNamedToolChoice.Function; - - /** - * The type of the tool. Currently, only `function` is supported. - */ - type: 'function'; -} - -export namespace ChatCompletionNamedToolChoice { - export interface Function { - /** - * The name of the function to call. - */ - name: string; - } -} - -/** - * The role of the author of a message - */ -export type ChatCompletionRole = 'system' | 'user' | 'assistant' | 'tool' | 'function'; - -/** - * Options for streaming response. Only set this when you set `stream: true`. - */ -export interface ChatCompletionStreamOptions { - /** - * If set, an additional chunk will be streamed before the `data: [DONE]` message. - * The `usage` field on this chunk shows the token usage statistics for the entire - * request, and the `choices` field will always be an empty array. All other chunks - * will also include a `usage` field, but with a null value. - */ - include_usage?: boolean; -} - -export interface ChatCompletionSystemMessageParam { - /** - * The contents of the system message. - */ - content: string | Array; - - /** - * The role of the messages author, in this case `system`. - */ - role: 'system'; - - /** - * An optional name for the participant. Provides the model information to - * differentiate between participants of the same role. - */ - name?: string; -} - -export interface ChatCompletionTokenLogprob { - /** - * The token. - */ - token: string; - - /** - * A list of integers representing the UTF-8 bytes representation of the token. - * Useful in instances where characters are represented by multiple tokens and - * their byte representations must be combined to generate the correct text - * representation. Can be `null` if there is no bytes representation for the token. - */ - bytes: Array | null; - - /** - * The log probability of this token, if it is within the top 20 most likely - * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very - * unlikely. - */ - logprob: number; - - /** - * List of the most likely tokens and their log probability, at this token - * position. In rare cases, there may be fewer than the number of requested - * `top_logprobs` returned. - */ - top_logprobs: Array; -} - -export namespace ChatCompletionTokenLogprob { - export interface TopLogprob { - /** - * The token. - */ - token: string; - - /** - * A list of integers representing the UTF-8 bytes representation of the token. - * Useful in instances where characters are represented by multiple tokens and - * their byte representations must be combined to generate the correct text - * representation. Can be `null` if there is no bytes representation for the token. - */ - bytes: Array | null; - - /** - * The log probability of this token, if it is within the top 20 most likely - * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very - * unlikely. - */ - logprob: number; - } -} - -export interface ChatCompletionTool { - function: Shared.FunctionDefinition; - - /** - * The type of the tool. Currently, only `function` is supported. - */ - type: 'function'; -} - -/** - * Controls which (if any) tool is called by the model. `none` means the model will - * not call any tool and instead generates a message. `auto` means the model can - * pick between generating a message or calling one or more tools. `required` means - * the model must call one or more tools. Specifying a particular tool via - * `{"type": "function", "function": {"name": "my_function"}}` forces the model to - * call that tool. - * - * `none` is the default when no tools are present. `auto` is the default if tools - * are present. - */ -export type ChatCompletionToolChoiceOption = 'none' | 'auto' | 'required' | ChatCompletionNamedToolChoice; - -export interface ChatCompletionToolMessageParam { - /** - * The contents of the tool message. - */ - content: string | Array; - - /** - * The role of the messages author, in this case `tool`. - */ - role: 'tool'; - - /** - * Tool call that this message is responding to. - */ - tool_call_id: string; -} - -export interface ChatCompletionUserMessageParam { - /** - * The contents of the user message. - */ - content: string | Array; - - /** - * The role of the messages author, in this case `user`. - */ - role: 'user'; - - /** - * An optional name for the participant. Provides the model information to - * differentiate between participants of the same role. - */ - name?: string; -} - -/** - * @deprecated ChatCompletionMessageParam should be used instead - */ -export type CreateChatCompletionRequestMessage = ChatCompletionMessageParam; - -export type ChatCompletionCreateParams = - | ChatCompletionCreateParamsNonStreaming - | ChatCompletionCreateParamsStreaming; - -export interface ChatCompletionCreateParamsBase { - /** - * A list of messages comprising the conversation so far. Depending on the - * [model](https://platform.openai.com/docs/models) you use, different message - * types (modalities) are supported, like - * [text](https://platform.openai.com/docs/guides/text-generation), - * [images](https://platform.openai.com/docs/guides/vision), and - * [audio](https://platform.openai.com/docs/guides/audio). - */ - messages: Array; - - /** - * ID of the model to use. See the - * [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) - * table for details on which models work with the Chat API. - */ - model: (string & {}) | ChatAPI.ChatModel; - - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on their - * existing frequency in the text so far, decreasing the model's likelihood to - * repeat the same line verbatim. - * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) - */ - frequency_penalty?: number | null; - - /** - * Deprecated in favor of `tool_choice`. - * - * Controls which (if any) function is called by the model. `none` means the model - * will not call a function and instead generates a message. `auto` means the model - * can pick between generating a message or calling a function. Specifying a - * particular function via `{"name": "my_function"}` forces the model to call that - * function. - * - * `none` is the default when no functions are present. `auto` is the default if - * functions are present. - */ - function_call?: 'none' | 'auto' | ChatCompletionFunctionCallOption; - - /** - * Deprecated in favor of `tools`. - * - * A list of functions the model may generate JSON inputs for. - */ - functions?: Array; - - /** - * Modify the likelihood of specified tokens appearing in the completion. - * - * Accepts a JSON object that maps tokens (specified by their token ID in the - * tokenizer) to an associated bias value from -100 to 100. Mathematically, the - * bias is added to the logits generated by the model prior to sampling. The exact - * effect will vary per model, but values between -1 and 1 should decrease or - * increase likelihood of selection; values like -100 or 100 should result in a ban - * or exclusive selection of the relevant token. - */ - logit_bias?: Record | null; - - /** - * Whether to return log probabilities of the output tokens or not. If true, - * returns the log probabilities of each output token returned in the `content` of - * `message`. - */ - logprobs?: boolean | null; - - /** - * An upper bound for the number of tokens that can be generated for a completion, - * including visible output tokens and - * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). - */ - max_completion_tokens?: number | null; - - /** - * The maximum number of [tokens](/tokenizer) that can be generated in the chat - * completion. This value can be used to control - * [costs](https://openai.com/api/pricing/) for text generated via API. - * - * This value is now deprecated in favor of `max_completion_tokens`, and is not - * compatible with - * [o1 series models](https://platform.openai.com/docs/guides/reasoning). - */ - max_tokens?: number | null; - - /** - * Developer-defined tags and values used for filtering completions in the - * [dashboard](https://platform.openai.com/completions). - */ - metadata?: Record | null; - - /** - * How many chat completion choices to generate for each input message. Note that - * you will be charged based on the number of generated tokens across all of the - * choices. Keep `n` as `1` to minimize costs. - */ - n?: number | null; - - /** - * Whether to enable - * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) - * during tool use. - */ - parallel_tool_calls?: boolean; - - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on - * whether they appear in the text so far, increasing the model's likelihood to - * talk about new topics. - * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) - */ - presence_penalty?: number | null; - - /** - * An object specifying the format that the model must output. Compatible with - * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), - * [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), - * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and - * all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - * - * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - * Outputs which ensures the model will match your supplied JSON schema. Learn more - * in the - * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - * - * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - * message the model generates is valid JSON. - * - * **Important:** when using JSON mode, you **must** also instruct the model to - * produce JSON yourself via a system or user message. Without this, the model may - * generate an unending stream of whitespace until the generation reaches the token - * limit, resulting in a long-running and seemingly "stuck" request. Also note that - * the message content may be partially cut off if `finish_reason="length"`, which - * indicates the generation exceeded `max_tokens` or the conversation exceeded the - * max context length. - */ - response_format?: - | Shared.ResponseFormatText - | Shared.ResponseFormatJSONObject - | Shared.ResponseFormatJSONSchema; - - /** - * This feature is in Beta. If specified, our system will make a best effort to - * sample deterministically, such that repeated requests with the same `seed` and - * parameters should return the same result. Determinism is not guaranteed, and you - * should refer to the `system_fingerprint` response parameter to monitor changes - * in the backend. - */ - seed?: number | null; - - /** - * Specifies the latency tier to use for processing the request. This parameter is - * relevant for customers subscribed to the scale tier service: - * - * - If set to 'auto', and the Project is Scale tier enabled, the system will - * utilize scale tier credits until they are exhausted. - * - If set to 'auto', and the Project is not Scale tier enabled, the request will - * be processed using the default service tier with a lower uptime SLA and no - * latency guarentee. - * - If set to 'default', the request will be processed using the default service - * tier with a lower uptime SLA and no latency guarentee. - * - When not set, the default behavior is 'auto'. - * - * When this parameter is set, the response body will include the `service_tier` - * utilized. - */ - service_tier?: 'auto' | 'default' | null; - - /** - * Up to 4 sequences where the API will stop generating further tokens. - */ - stop?: string | null | Array; - - /** - * Whether or not to store the output of this completion request for traffic - * logging in the [dashboard](https://platform.openai.com/completions). - */ - store?: boolean | null; - - /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - */ - stream?: boolean | null; - - /** - * Options for streaming response. Only set this when you set `stream: true`. - */ - stream_options?: ChatCompletionStreamOptions | null; - - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - * make the output more random, while lower values like 0.2 will make it more - * focused and deterministic. - * - * We generally recommend altering this or `top_p` but not both. - */ - temperature?: number | null; - - /** - * Controls which (if any) tool is called by the model. `none` means the model will - * not call any tool and instead generates a message. `auto` means the model can - * pick between generating a message or calling one or more tools. `required` means - * the model must call one or more tools. Specifying a particular tool via - * `{"type": "function", "function": {"name": "my_function"}}` forces the model to - * call that tool. - * - * `none` is the default when no tools are present. `auto` is the default if tools - * are present. - */ - tool_choice?: ChatCompletionToolChoiceOption; - - /** - * A list of tools the model may call. Currently, only functions are supported as a - * tool. Use this to provide a list of functions the model may generate JSON inputs - * for. A max of 128 functions are supported. - */ - tools?: Array; - - /** - * An integer between 0 and 20 specifying the number of most likely tokens to - * return at each token position, each with an associated log probability. - * `logprobs` must be set to `true` if this parameter is used. - */ - top_logprobs?: number | null; - - /** - * An alternative to sampling with temperature, called nucleus sampling, where the - * model considers the results of the tokens with top_p probability mass. So 0.1 - * means only the tokens comprising the top 10% probability mass are considered. - * - * We generally recommend altering this or `temperature` but not both. - */ - top_p?: number | null; - - /** - * A unique identifier representing your end-user, which can help OpenAI to monitor - * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). - */ - user?: string; -} - -export namespace ChatCompletionCreateParams { - /** - * @deprecated - */ - export interface Function { - /** - * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain - * underscores and dashes, with a maximum length of 64. - */ - name: string; - - /** - * A description of what the function does, used by the model to choose when and - * how to call the function. - */ - description?: string; - - /** - * The parameters the functions accepts, described as a JSON Schema object. See the - * [guide](https://platform.openai.com/docs/guides/function-calling) for examples, - * and the - * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - * documentation about the format. - * - * Omitting `parameters` defines a function with an empty parameter list. - */ - parameters?: Shared.FunctionParameters; - } - - export type ChatCompletionCreateParamsNonStreaming = - ChatCompletionsAPI.ChatCompletionCreateParamsNonStreaming; - export type ChatCompletionCreateParamsStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsStreaming; -} - -/** - * @deprecated Use ChatCompletionCreateParams instead - */ -export type CompletionCreateParams = ChatCompletionCreateParams; - -export interface ChatCompletionCreateParamsNonStreaming extends ChatCompletionCreateParamsBase { - /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - */ - stream?: false | null; -} - -/** - * @deprecated Use ChatCompletionCreateParamsNonStreaming instead - */ -export type CompletionCreateParamsNonStreaming = ChatCompletionCreateParamsNonStreaming; - -export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreateParamsBase { - /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). - */ - stream: true; -} - -/** - * @deprecated Use ChatCompletionCreateParamsStreaming instead - */ -export type CompletionCreateParamsStreaming = ChatCompletionCreateParamsStreaming; - -export namespace Completions { - export import ChatCompletion = ChatCompletionsAPI.ChatCompletion; - export import ChatCompletionAssistantMessageParam = ChatCompletionsAPI.ChatCompletionAssistantMessageParam; - export import ChatCompletionChunk = ChatCompletionsAPI.ChatCompletionChunk; - export import ChatCompletionContentPart = ChatCompletionsAPI.ChatCompletionContentPart; - export import ChatCompletionContentPartImage = ChatCompletionsAPI.ChatCompletionContentPartImage; - export import ChatCompletionContentPartRefusal = ChatCompletionsAPI.ChatCompletionContentPartRefusal; - export import ChatCompletionContentPartText = ChatCompletionsAPI.ChatCompletionContentPartText; - export import ChatCompletionFunctionCallOption = ChatCompletionsAPI.ChatCompletionFunctionCallOption; - export import ChatCompletionFunctionMessageParam = ChatCompletionsAPI.ChatCompletionFunctionMessageParam; - export import ChatCompletionMessage = ChatCompletionsAPI.ChatCompletionMessage; - export import ChatCompletionMessageParam = ChatCompletionsAPI.ChatCompletionMessageParam; - export import ChatCompletionMessageToolCall = ChatCompletionsAPI.ChatCompletionMessageToolCall; - export import ChatCompletionNamedToolChoice = ChatCompletionsAPI.ChatCompletionNamedToolChoice; - export import ChatCompletionRole = ChatCompletionsAPI.ChatCompletionRole; - export import ChatCompletionStreamOptions = ChatCompletionsAPI.ChatCompletionStreamOptions; - export import ChatCompletionSystemMessageParam = ChatCompletionsAPI.ChatCompletionSystemMessageParam; - export import ChatCompletionTokenLogprob = ChatCompletionsAPI.ChatCompletionTokenLogprob; - export import ChatCompletionTool = ChatCompletionsAPI.ChatCompletionTool; - export import ChatCompletionToolChoiceOption = ChatCompletionsAPI.ChatCompletionToolChoiceOption; - export import ChatCompletionToolMessageParam = ChatCompletionsAPI.ChatCompletionToolMessageParam; - export import ChatCompletionUserMessageParam = ChatCompletionsAPI.ChatCompletionUserMessageParam; - /** - * @deprecated ChatCompletionMessageParam should be used instead - */ - export import CreateChatCompletionRequestMessage = ChatCompletionsAPI.CreateChatCompletionRequestMessage; - export import ChatCompletionCreateParams = ChatCompletionsAPI.ChatCompletionCreateParams; - export import CompletionCreateParams = ChatCompletionsAPI.CompletionCreateParams; - export import ChatCompletionCreateParamsNonStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsNonStreaming; - export import CompletionCreateParamsNonStreaming = ChatCompletionsAPI.CompletionCreateParamsNonStreaming; - export import ChatCompletionCreateParamsStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsStreaming; - export import CompletionCreateParamsStreaming = ChatCompletionsAPI.CompletionCreateParamsStreaming; -} +export * from './completions/completions'; diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts new file mode 100644 index 000000000..6481f8e0f --- /dev/null +++ b/src/resources/chat/completions/completions.ts @@ -0,0 +1,1704 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import { APIPromise } from '../../../core'; +import * as Core from '../../../core'; +import * as CompletionsCompletionsAPI from './completions'; +import * as CompletionsAPI from '../../completions'; +import * as Shared from '../../shared'; +import * as MessagesAPI from './messages'; +import { MessageListParams, Messages } from './messages'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; +import { Stream } from '../../../streaming'; + +export class Completions extends APIResource { + messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client); + + /** + * **Starting a new project?** We recommend trying + * [Responses](https://platform.openai.com/docs/api-reference/responses) to take + * advantage of the latest OpenAI platform features. Compare + * [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + * + * --- + * + * Creates a model response for the given chat conversation. Learn more in the + * [text generation](https://platform.openai.com/docs/guides/text-generation), + * [vision](https://platform.openai.com/docs/guides/vision), and + * [audio](https://platform.openai.com/docs/guides/audio) guides. + * + * Parameter support can differ depending on the model used to generate the + * response, particularly for newer reasoning models. Parameters that are only + * supported for reasoning models are noted below. For the current state of + * unsupported parameters in reasoning models, + * [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + * + * @example + * ```ts + * const chatCompletion = await client.chat.completions.create( + * { + * messages: [{ content: 'string', role: 'developer' }], + * model: 'gpt-4o', + * }, + * ); + * ``` + */ + create( + body: ChatCompletionCreateParamsNonStreaming, + options?: Core.RequestOptions, + ): APIPromise; + create( + body: ChatCompletionCreateParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + create( + body: ChatCompletionCreateParamsBase, + options?: Core.RequestOptions, + ): APIPromise | ChatCompletion>; + create( + body: ChatCompletionCreateParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + return this._client.post('/chat/completions', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise>; + } + + /** + * Get a stored chat completion. Only Chat Completions that have been created with + * the `store` parameter set to `true` will be returned. + * + * @example + * ```ts + * const chatCompletion = + * await client.chat.completions.retrieve('completion_id'); + * ``` + */ + retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/chat/completions/${completionId}`, options); + } + + /** + * Modify a stored chat completion. Only Chat Completions that have been created + * with the `store` parameter set to `true` can be modified. Currently, the only + * supported modification is to update the `metadata` field. + * + * @example + * ```ts + * const chatCompletion = await client.chat.completions.update( + * 'completion_id', + * { metadata: { foo: 'string' } }, + * ); + * ``` + */ + update( + completionId: string, + body: ChatCompletionUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/chat/completions/${completionId}`, { body, ...options }); + } + + /** + * List stored Chat Completions. Only Chat Completions that have been stored with + * the `store` parameter set to `true` will be returned. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const chatCompletion of client.chat.completions.list()) { + * // ... + * } + * ``` + */ + list( + query?: ChatCompletionListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list(options?: Core.RequestOptions): Core.PagePromise; + list( + query: ChatCompletionListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.getAPIList('/chat/completions', ChatCompletionsPage, { query, ...options }); + } + + /** + * Delete a stored chat completion. Only Chat Completions that have been created + * with the `store` parameter set to `true` can be deleted. + * + * @example + * ```ts + * const chatCompletionDeleted = + * await client.chat.completions.del('completion_id'); + * ``` + */ + del(completionId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/chat/completions/${completionId}`, options); + } +} + +export class ChatCompletionsPage extends CursorPage {} + +export class ChatCompletionStoreMessagesPage extends CursorPage {} + +/** + * Represents a chat completion response returned by model, based on the provided + * input. + */ +export interface ChatCompletion { + /** + * A unique identifier for the chat completion. + */ + id: string; + + /** + * A list of chat completion choices. Can be more than one if `n` is greater + * than 1. + */ + choices: Array; + + /** + * The Unix timestamp (in seconds) of when the chat completion was created. + */ + created: number; + + /** + * The model used for the chat completion. + */ + model: string; + + /** + * The object type, which is always `chat.completion`. + */ + object: 'chat.completion'; + + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + service_tier?: 'auto' | 'default' | 'flex' | null; + + /** + * This fingerprint represents the backend configuration that the model runs with. + * + * Can be used in conjunction with the `seed` request parameter to understand when + * backend changes have been made that might impact determinism. + */ + system_fingerprint?: string; + + /** + * Usage statistics for the completion request. + */ + usage?: CompletionsAPI.CompletionUsage; +} + +export namespace ChatCompletion { + export interface Choice { + /** + * The reason the model stopped generating tokens. This will be `stop` if the model + * hit a natural stop point or a provided stop sequence, `length` if the maximum + * number of tokens specified in the request was reached, `content_filter` if + * content was omitted due to a flag from our content filters, `tool_calls` if the + * model called a tool, or `function_call` (deprecated) if the model called a + * function. + */ + finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call'; + + /** + * The index of the choice in the list of choices. + */ + index: number; + + /** + * Log probability information for the choice. + */ + logprobs: Choice.Logprobs | null; + + /** + * A chat completion message generated by the model. + */ + message: CompletionsCompletionsAPI.ChatCompletionMessage; + } + + export namespace Choice { + /** + * Log probability information for the choice. + */ + export interface Logprobs { + /** + * A list of message content tokens with log probability information. + */ + content: Array | null; + + /** + * A list of message refusal tokens with log probability information. + */ + refusal: Array | null; + } + } +} + +/** + * Messages sent by the model in response to user messages. + */ +export interface ChatCompletionAssistantMessageParam { + /** + * The role of the messages author, in this case `assistant`. + */ + role: 'assistant'; + + /** + * Data about a previous audio response from the model. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ + audio?: ChatCompletionAssistantMessageParam.Audio | null; + + /** + * The contents of the assistant message. Required unless `tool_calls` or + * `function_call` is specified. + */ + content?: string | Array | null; + + /** + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. + */ + function_call?: ChatCompletionAssistantMessageParam.FunctionCall | null; + + /** + * An optional name for the participant. Provides the model information to + * differentiate between participants of the same role. + */ + name?: string; + + /** + * The refusal message by the assistant. + */ + refusal?: string | null; + + /** + * The tool calls generated by the model, such as function calls. + */ + tool_calls?: Array; +} + +export namespace ChatCompletionAssistantMessageParam { + /** + * Data about a previous audio response from the model. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ + export interface Audio { + /** + * Unique identifier for a previous audio response from the model. + */ + id: string; + } + + /** + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. + */ + export interface FunctionCall { + /** + * The arguments to call the function with, as generated by the model in JSON + * format. Note that the model does not always generate valid JSON, and may + * hallucinate parameters not defined by your function schema. Validate the + * arguments in your code before calling your function. + */ + arguments: string; + + /** + * The name of the function to call. + */ + name: string; + } +} + +/** + * If the audio output modality is requested, this object contains data about the + * audio response from the model. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ +export interface ChatCompletionAudio { + /** + * Unique identifier for this audio response. + */ + id: string; + + /** + * Base64 encoded audio bytes generated by the model, in the format specified in + * the request. + */ + data: string; + + /** + * The Unix timestamp (in seconds) for when this audio response will no longer be + * accessible on the server for use in multi-turn conversations. + */ + expires_at: number; + + /** + * Transcript of the audio generated by the model. + */ + transcript: string; +} + +/** + * Parameters for audio output. Required when audio output is requested with + * `modalities: ["audio"]`. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ +export interface ChatCompletionAudioParam { + /** + * Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, + * or `pcm16`. + */ + format: 'wav' | 'aac' | 'mp3' | 'flac' | 'opus' | 'pcm16'; + + /** + * The voice the model uses to respond. Supported voices are `alloy`, `ash`, + * `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`. + */ + voice: + | (string & {}) + | 'alloy' + | 'ash' + | 'ballad' + | 'coral' + | 'echo' + | 'fable' + | 'onyx' + | 'nova' + | 'sage' + | 'shimmer' + | 'verse'; +} + +/** + * Represents a streamed chunk of a chat completion response returned by the model, + * based on the provided input. + * [Learn more](https://platform.openai.com/docs/guides/streaming-responses). + */ +export interface ChatCompletionChunk { + /** + * A unique identifier for the chat completion. Each chunk has the same ID. + */ + id: string; + + /** + * A list of chat completion choices. Can contain more than one elements if `n` is + * greater than 1. Can also be empty for the last chunk if you set + * `stream_options: {"include_usage": true}`. + */ + choices: Array; + + /** + * The Unix timestamp (in seconds) of when the chat completion was created. Each + * chunk has the same timestamp. + */ + created: number; + + /** + * The model to generate the completion. + */ + model: string; + + /** + * The object type, which is always `chat.completion.chunk`. + */ + object: 'chat.completion.chunk'; + + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + service_tier?: 'auto' | 'default' | 'flex' | null; + + /** + * This fingerprint represents the backend configuration that the model runs with. + * Can be used in conjunction with the `seed` request parameter to understand when + * backend changes have been made that might impact determinism. + */ + system_fingerprint?: string; + + /** + * An optional field that will only be present when you set + * `stream_options: {"include_usage": true}` in your request. When present, it + * contains a null value **except for the last chunk** which contains the token + * usage statistics for the entire request. + * + * **NOTE:** If the stream is interrupted or cancelled, you may not receive the + * final usage chunk which contains the total token usage for the request. + */ + usage?: CompletionsAPI.CompletionUsage | null; +} + +export namespace ChatCompletionChunk { + export interface Choice { + /** + * A chat completion delta generated by streamed model responses. + */ + delta: Choice.Delta; + + /** + * The reason the model stopped generating tokens. This will be `stop` if the model + * hit a natural stop point or a provided stop sequence, `length` if the maximum + * number of tokens specified in the request was reached, `content_filter` if + * content was omitted due to a flag from our content filters, `tool_calls` if the + * model called a tool, or `function_call` (deprecated) if the model called a + * function. + */ + finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | null; + + /** + * The index of the choice in the list of choices. + */ + index: number; + + /** + * Log probability information for the choice. + */ + logprobs?: Choice.Logprobs | null; + } + + export namespace Choice { + /** + * A chat completion delta generated by streamed model responses. + */ + export interface Delta { + /** + * The contents of the chunk message. + */ + content?: string | null; + + /** + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. + */ + function_call?: Delta.FunctionCall; + + /** + * The refusal message generated by the model. + */ + refusal?: string | null; + + /** + * The role of the author of this message. + */ + role?: 'developer' | 'system' | 'user' | 'assistant' | 'tool'; + + tool_calls?: Array; + } + + export namespace Delta { + /** + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. + */ + export interface FunctionCall { + /** + * The arguments to call the function with, as generated by the model in JSON + * format. Note that the model does not always generate valid JSON, and may + * hallucinate parameters not defined by your function schema. Validate the + * arguments in your code before calling your function. + */ + arguments?: string; + + /** + * The name of the function to call. + */ + name?: string; + } + + export interface ToolCall { + index: number; + + /** + * The ID of the tool call. + */ + id?: string; + + function?: ToolCall.Function; + + /** + * The type of the tool. Currently, only `function` is supported. + */ + type?: 'function'; + } + + export namespace ToolCall { + export interface Function { + /** + * The arguments to call the function with, as generated by the model in JSON + * format. Note that the model does not always generate valid JSON, and may + * hallucinate parameters not defined by your function schema. Validate the + * arguments in your code before calling your function. + */ + arguments?: string; + + /** + * The name of the function to call. + */ + name?: string; + } + } + } + + /** + * Log probability information for the choice. + */ + export interface Logprobs { + /** + * A list of message content tokens with log probability information. + */ + content: Array | null; + + /** + * A list of message refusal tokens with log probability information. + */ + refusal: Array | null; + } + } +} + +/** + * Learn about + * [text inputs](https://platform.openai.com/docs/guides/text-generation). + */ +export type ChatCompletionContentPart = + | ChatCompletionContentPartText + | ChatCompletionContentPartImage + | ChatCompletionContentPartInputAudio + | ChatCompletionContentPart.File; + +export namespace ChatCompletionContentPart { + /** + * Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text + * generation. + */ + export interface File { + file: File.File; + + /** + * The type of the content part. Always `file`. + */ + type: 'file'; + } + + export namespace File { + export interface File { + /** + * The base64 encoded file data, used when passing the file to the model as a + * string. + */ + file_data?: string; + + /** + * The ID of an uploaded file to use as input. + */ + file_id?: string; + + /** + * The name of the file, used when passing the file to the model as a string. + */ + filename?: string; + } + } +} + +/** + * Learn about [image inputs](https://platform.openai.com/docs/guides/vision). + */ +export interface ChatCompletionContentPartImage { + image_url: ChatCompletionContentPartImage.ImageURL; + + /** + * The type of the content part. + */ + type: 'image_url'; +} + +export namespace ChatCompletionContentPartImage { + export interface ImageURL { + /** + * Either a URL of the image or the base64 encoded image data. + */ + url: string; + + /** + * Specifies the detail level of the image. Learn more in the + * [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + */ + detail?: 'auto' | 'low' | 'high'; + } +} + +/** + * Learn about [audio inputs](https://platform.openai.com/docs/guides/audio). + */ +export interface ChatCompletionContentPartInputAudio { + input_audio: ChatCompletionContentPartInputAudio.InputAudio; + + /** + * The type of the content part. Always `input_audio`. + */ + type: 'input_audio'; +} + +export namespace ChatCompletionContentPartInputAudio { + export interface InputAudio { + /** + * Base64 encoded audio data. + */ + data: string; + + /** + * The format of the encoded audio data. Currently supports "wav" and "mp3". + */ + format: 'wav' | 'mp3'; + } +} + +export interface ChatCompletionContentPartRefusal { + /** + * The refusal message generated by the model. + */ + refusal: string; + + /** + * The type of the content part. + */ + type: 'refusal'; +} + +/** + * Learn about + * [text inputs](https://platform.openai.com/docs/guides/text-generation). + */ +export interface ChatCompletionContentPartText { + /** + * The text content. + */ + text: string; + + /** + * The type of the content part. + */ + type: 'text'; +} + +export interface ChatCompletionDeleted { + /** + * The ID of the chat completion that was deleted. + */ + id: string; + + /** + * Whether the chat completion was deleted. + */ + deleted: boolean; + + /** + * The type of object being deleted. + */ + object: 'chat.completion.deleted'; +} + +/** + * Developer-provided instructions that the model should follow, regardless of + * messages sent by the user. With o1 models and newer, `developer` messages + * replace the previous `system` messages. + */ +export interface ChatCompletionDeveloperMessageParam { + /** + * The contents of the developer message. + */ + content: string | Array; + + /** + * The role of the messages author, in this case `developer`. + */ + role: 'developer'; + + /** + * An optional name for the participant. Provides the model information to + * differentiate between participants of the same role. + */ + name?: string; +} + +/** + * Specifying a particular function via `{"name": "my_function"}` forces the model + * to call that function. + */ +export interface ChatCompletionFunctionCallOption { + /** + * The name of the function to call. + */ + name: string; +} + +/** + * @deprecated + */ +export interface ChatCompletionFunctionMessageParam { + /** + * The contents of the function message. + */ + content: string | null; + + /** + * The name of the function to call. + */ + name: string; + + /** + * The role of the messages author, in this case `function`. + */ + role: 'function'; +} + +/** + * A chat completion message generated by the model. + */ +export interface ChatCompletionMessage { + /** + * The contents of the message. + */ + content: string | null; + + /** + * The refusal message generated by the model. + */ + refusal: string | null; + + /** + * The role of the author of this message. + */ + role: 'assistant'; + + /** + * Annotations for the message, when applicable, as when using the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + annotations?: Array; + + /** + * If the audio output modality is requested, this object contains data about the + * audio response from the model. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ + audio?: ChatCompletionAudio | null; + + /** + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. + */ + function_call?: ChatCompletionMessage.FunctionCall | null; + + /** + * The tool calls generated by the model, such as function calls. + */ + tool_calls?: Array; +} + +export namespace ChatCompletionMessage { + /** + * A URL citation when using web search. + */ + export interface Annotation { + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * A URL citation when using web search. + */ + url_citation: Annotation.URLCitation; + } + + export namespace Annotation { + /** + * A URL citation when using web search. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The URL of the web resource. + */ + url: string; + } + } + + /** + * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a + * function that should be called, as generated by the model. + */ + export interface FunctionCall { + /** + * The arguments to call the function with, as generated by the model in JSON + * format. Note that the model does not always generate valid JSON, and may + * hallucinate parameters not defined by your function schema. Validate the + * arguments in your code before calling your function. + */ + arguments: string; + + /** + * The name of the function to call. + */ + name: string; + } +} + +/** + * Developer-provided instructions that the model should follow, regardless of + * messages sent by the user. With o1 models and newer, `developer` messages + * replace the previous `system` messages. + */ +export type ChatCompletionMessageParam = + | ChatCompletionDeveloperMessageParam + | ChatCompletionSystemMessageParam + | ChatCompletionUserMessageParam + | ChatCompletionAssistantMessageParam + | ChatCompletionToolMessageParam + | ChatCompletionFunctionMessageParam; + +export interface ChatCompletionMessageToolCall { + /** + * The ID of the tool call. + */ + id: string; + + /** + * The function that the model called. + */ + function: ChatCompletionMessageToolCall.Function; + + /** + * The type of the tool. Currently, only `function` is supported. + */ + type: 'function'; +} + +export namespace ChatCompletionMessageToolCall { + /** + * The function that the model called. + */ + export interface Function { + /** + * The arguments to call the function with, as generated by the model in JSON + * format. Note that the model does not always generate valid JSON, and may + * hallucinate parameters not defined by your function schema. Validate the + * arguments in your code before calling your function. + */ + arguments: string; + + /** + * The name of the function to call. + */ + name: string; + } +} + +export type ChatCompletionModality = 'text' | 'audio'; + +/** + * Specifies a tool the model should use. Use to force the model to call a specific + * function. + */ +export interface ChatCompletionNamedToolChoice { + function: ChatCompletionNamedToolChoice.Function; + + /** + * The type of the tool. Currently, only `function` is supported. + */ + type: 'function'; +} + +export namespace ChatCompletionNamedToolChoice { + export interface Function { + /** + * The name of the function to call. + */ + name: string; + } +} + +/** + * Static predicted output content, such as the content of a text file that is + * being regenerated. + */ +export interface ChatCompletionPredictionContent { + /** + * The content that should be matched when generating a model response. If + * generated tokens would match this content, the entire model response can be + * returned much more quickly. + */ + content: string | Array; + + /** + * The type of the predicted content you want to provide. This type is currently + * always `content`. + */ + type: 'content'; +} + +/** + * The role of the author of a message + */ +export type ChatCompletionRole = 'developer' | 'system' | 'user' | 'assistant' | 'tool' | 'function'; + +/** + * A chat completion message generated by the model. + */ +export interface ChatCompletionStoreMessage extends ChatCompletionMessage { + /** + * The identifier of the chat message. + */ + id: string; +} + +/** + * Options for streaming response. Only set this when you set `stream: true`. + */ +export interface ChatCompletionStreamOptions { + /** + * If set, an additional chunk will be streamed before the `data: [DONE]` message. + * The `usage` field on this chunk shows the token usage statistics for the entire + * request, and the `choices` field will always be an empty array. + * + * All other chunks will also include a `usage` field, but with a null value. + * **NOTE:** If the stream is interrupted, you may not receive the final usage + * chunk which contains the total token usage for the request. + */ + include_usage?: boolean; +} + +/** + * Developer-provided instructions that the model should follow, regardless of + * messages sent by the user. With o1 models and newer, use `developer` messages + * for this purpose instead. + */ +export interface ChatCompletionSystemMessageParam { + /** + * The contents of the system message. + */ + content: string | Array; + + /** + * The role of the messages author, in this case `system`. + */ + role: 'system'; + + /** + * An optional name for the participant. Provides the model information to + * differentiate between participants of the same role. + */ + name?: string; +} + +export interface ChatCompletionTokenLogprob { + /** + * The token. + */ + token: string; + + /** + * A list of integers representing the UTF-8 bytes representation of the token. + * Useful in instances where characters are represented by multiple tokens and + * their byte representations must be combined to generate the correct text + * representation. Can be `null` if there is no bytes representation for the token. + */ + bytes: Array | null; + + /** + * The log probability of this token, if it is within the top 20 most likely + * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + * unlikely. + */ + logprob: number; + + /** + * List of the most likely tokens and their log probability, at this token + * position. In rare cases, there may be fewer than the number of requested + * `top_logprobs` returned. + */ + top_logprobs: Array; +} + +export namespace ChatCompletionTokenLogprob { + export interface TopLogprob { + /** + * The token. + */ + token: string; + + /** + * A list of integers representing the UTF-8 bytes representation of the token. + * Useful in instances where characters are represented by multiple tokens and + * their byte representations must be combined to generate the correct text + * representation. Can be `null` if there is no bytes representation for the token. + */ + bytes: Array | null; + + /** + * The log probability of this token, if it is within the top 20 most likely + * tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + * unlikely. + */ + logprob: number; + } +} + +export interface ChatCompletionTool { + function: Shared.FunctionDefinition; + + /** + * The type of the tool. Currently, only `function` is supported. + */ + type: 'function'; +} + +/** + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tool and instead generates a message. `auto` means the model can + * pick between generating a message or calling one or more tools. `required` means + * the model must call one or more tools. Specifying a particular tool via + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to + * call that tool. + * + * `none` is the default when no tools are present. `auto` is the default if tools + * are present. + */ +export type ChatCompletionToolChoiceOption = 'none' | 'auto' | 'required' | ChatCompletionNamedToolChoice; + +export interface ChatCompletionToolMessageParam { + /** + * The contents of the tool message. + */ + content: string | Array; + + /** + * The role of the messages author, in this case `tool`. + */ + role: 'tool'; + + /** + * Tool call that this message is responding to. + */ + tool_call_id: string; +} + +/** + * Messages sent by an end user, containing prompts or additional context + * information. + */ +export interface ChatCompletionUserMessageParam { + /** + * The contents of the user message. + */ + content: string | Array; + + /** + * The role of the messages author, in this case `user`. + */ + role: 'user'; + + /** + * An optional name for the participant. Provides the model information to + * differentiate between participants of the same role. + */ + name?: string; +} + +/** + * @deprecated ChatCompletionMessageParam should be used instead + */ +export type CreateChatCompletionRequestMessage = ChatCompletionMessageParam; + +export type ChatCompletionReasoningEffort = Shared.ReasoningEffort | null; + +export type ChatCompletionCreateParams = + | ChatCompletionCreateParamsNonStreaming + | ChatCompletionCreateParamsStreaming; + +export interface ChatCompletionCreateParamsBase { + /** + * A list of messages comprising the conversation so far. Depending on the + * [model](https://platform.openai.com/docs/models) you use, different message + * types (modalities) are supported, like + * [text](https://platform.openai.com/docs/guides/text-generation), + * [images](https://platform.openai.com/docs/guides/vision), and + * [audio](https://platform.openai.com/docs/guides/audio). + */ + messages: Array; + + /** + * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. + */ + model: (string & {}) | Shared.ChatModel; + + /** + * Parameters for audio output. Required when audio output is requested with + * `modalities: ["audio"]`. + * [Learn more](https://platform.openai.com/docs/guides/audio). + */ + audio?: ChatCompletionAudioParam | null; + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their + * existing frequency in the text so far, decreasing the model's likelihood to + * repeat the same line verbatim. + */ + frequency_penalty?: number | null; + + /** + * @deprecated Deprecated in favor of `tool_choice`. + * + * Controls which (if any) function is called by the model. + * + * `none` means the model will not call a function and instead generates a message. + * + * `auto` means the model can pick between generating a message or calling a + * function. + * + * Specifying a particular function via `{"name": "my_function"}` forces the model + * to call that function. + * + * `none` is the default when no functions are present. `auto` is the default if + * functions are present. + */ + function_call?: 'none' | 'auto' | ChatCompletionFunctionCallOption; + + /** + * @deprecated Deprecated in favor of `tools`. + * + * A list of functions the model may generate JSON inputs for. + */ + functions?: Array; + + /** + * Modify the likelihood of specified tokens appearing in the completion. + * + * Accepts a JSON object that maps tokens (specified by their token ID in the + * tokenizer) to an associated bias value from -100 to 100. Mathematically, the + * bias is added to the logits generated by the model prior to sampling. The exact + * effect will vary per model, but values between -1 and 1 should decrease or + * increase likelihood of selection; values like -100 or 100 should result in a ban + * or exclusive selection of the relevant token. + */ + logit_bias?: Record | null; + + /** + * Whether to return log probabilities of the output tokens or not. If true, + * returns the log probabilities of each output token returned in the `content` of + * `message`. + */ + logprobs?: boolean | null; + + /** + * An upper bound for the number of tokens that can be generated for a completion, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + max_completion_tokens?: number | null; + + /** + * @deprecated The maximum number of [tokens](/tokenizer) that can be generated in + * the chat completion. This value can be used to control + * [costs](https://openai.com/api/pricing/) for text generated via API. + * + * This value is now deprecated in favor of `max_completion_tokens`, and is not + * compatible with + * [o-series models](https://platform.openai.com/docs/guides/reasoning). + */ + max_tokens?: number | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * Output types that you would like the model to generate. Most models are capable + * of generating text, which is the default: + * + * `["text"]` + * + * The `gpt-4o-audio-preview` model can also be used to + * [generate audio](https://platform.openai.com/docs/guides/audio). To request that + * this model generate both text and audio responses, you can use: + * + * `["text", "audio"]` + */ + modalities?: Array<'text' | 'audio'> | null; + + /** + * How many chat completion choices to generate for each input message. Note that + * you will be charged based on the number of generated tokens across all of the + * choices. Keep `n` as `1` to minimize costs. + */ + n?: number | null; + + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + parallel_tool_calls?: boolean; + + /** + * Static predicted output content, such as the content of a text file that is + * being regenerated. + */ + prediction?: ChatCompletionPredictionContent | null; + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on + * whether they appear in the text so far, increasing the model's likelihood to + * talk about new topics. + */ + presence_penalty?: number | null; + + /** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * An object specifying the format that the model must output. + * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which ensures the model will match your supplied JSON schema. Learn more + * in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. + */ + response_format?: + | Shared.ResponseFormatText + | Shared.ResponseFormatJSONSchema + | Shared.ResponseFormatJSONObject; + + /** + * This feature is in Beta. If specified, our system will make a best effort to + * sample deterministically, such that repeated requests with the same `seed` and + * parameters should return the same result. Determinism is not guaranteed, and you + * should refer to the `system_fingerprint` response parameter to monitor changes + * in the backend. + */ + seed?: number | null; + + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + service_tier?: 'auto' | 'default' | 'flex' | null; + + /** + * Not supported with latest reasoning models `o3` and `o4-mini`. + * + * Up to 4 sequences where the API will stop generating further tokens. The + * returned text will not contain the stop sequence. + */ + stop?: string | null | Array; + + /** + * Whether or not to store the output of this chat completion request for use in + * our [model distillation](https://platform.openai.com/docs/guides/distillation) + * or [evals](https://platform.openai.com/docs/guides/evals) products. + */ + store?: boolean | null; + + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. + */ + stream?: boolean | null; + + /** + * Options for streaming response. Only set this when you set `stream: true`. + */ + stream_options?: ChatCompletionStreamOptions | null; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. + */ + temperature?: number | null; + + /** + * Controls which (if any) tool is called by the model. `none` means the model will + * not call any tool and instead generates a message. `auto` means the model can + * pick between generating a message or calling one or more tools. `required` means + * the model must call one or more tools. Specifying a particular tool via + * `{"type": "function", "function": {"name": "my_function"}}` forces the model to + * call that tool. + * + * `none` is the default when no tools are present. `auto` is the default if tools + * are present. + */ + tool_choice?: ChatCompletionToolChoiceOption; + + /** + * A list of tools the model may call. Currently, only functions are supported as a + * tool. Use this to provide a list of functions the model may generate JSON inputs + * for. A max of 128 functions are supported. + */ + tools?: Array; + + /** + * An integer between 0 and 20 specifying the number of most likely tokens to + * return at each token position, each with an associated log probability. + * `logprobs` must be set to `true` if this parameter is used. + */ + top_logprobs?: number | null; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p?: number | null; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + user?: string; + + /** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + web_search_options?: ChatCompletionCreateParams.WebSearchOptions; +} + +export namespace ChatCompletionCreateParams { + /** + * @deprecated + */ + export interface Function { + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + * underscores and dashes, with a maximum length of 64. + */ + name: string; + + /** + * A description of what the function does, used by the model to choose when and + * how to call the function. + */ + description?: string; + + /** + * The parameters the functions accepts, described as a JSON Schema object. See the + * [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + * and the + * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + * documentation about the format. + * + * Omitting `parameters` defines a function with an empty parameter list. + */ + parameters?: Shared.FunctionParameters; + } + + /** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + export interface WebSearchOptions { + /** + * High level guidance for the amount of context window space to use for the + * search. One of `low`, `medium`, or `high`. `medium` is the default. + */ + search_context_size?: 'low' | 'medium' | 'high'; + + /** + * Approximate location parameters for the search. + */ + user_location?: WebSearchOptions.UserLocation | null; + } + + export namespace WebSearchOptions { + /** + * Approximate location parameters for the search. + */ + export interface UserLocation { + /** + * Approximate location parameters for the search. + */ + approximate: UserLocation.Approximate; + + /** + * The type of location approximation. Always `approximate`. + */ + type: 'approximate'; + } + + export namespace UserLocation { + /** + * Approximate location parameters for the search. + */ + export interface Approximate { + /** + * Free text input for the city of the user, e.g. `San Francisco`. + */ + city?: string; + + /** + * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + * the user, e.g. `US`. + */ + country?: string; + + /** + * Free text input for the region of the user, e.g. `California`. + */ + region?: string; + + /** + * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + * user, e.g. `America/Los_Angeles`. + */ + timezone?: string; + } + } + } + + export type ChatCompletionCreateParamsNonStreaming = + CompletionsCompletionsAPI.ChatCompletionCreateParamsNonStreaming; + export type ChatCompletionCreateParamsStreaming = + CompletionsCompletionsAPI.ChatCompletionCreateParamsStreaming; +} + +/** + * @deprecated Use ChatCompletionCreateParams instead + */ +export type CompletionCreateParams = ChatCompletionCreateParams; + +export interface ChatCompletionCreateParamsNonStreaming extends ChatCompletionCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. + */ + stream?: false | null; +} + +/** + * @deprecated Use ChatCompletionCreateParamsNonStreaming instead + */ +export type CompletionCreateParamsNonStreaming = ChatCompletionCreateParamsNonStreaming; + +export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. + */ + stream: true; +} + +/** + * @deprecated Use ChatCompletionCreateParamsStreaming instead + */ +export type CompletionCreateParamsStreaming = ChatCompletionCreateParamsStreaming; + +export interface ChatCompletionUpdateParams { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; +} + +/** + * @deprecated Use ChatCompletionUpdateParams instead + */ +export type CompletionUpdateParams = ChatCompletionUpdateParams; + +export interface ChatCompletionListParams extends CursorPageParams { + /** + * A list of metadata keys to filter the Chat Completions by. Example: + * + * `metadata[key1]=value1&metadata[key2]=value2` + */ + metadata?: Shared.Metadata | null; + + /** + * The model used to generate the Chat Completions. + */ + model?: string; + + /** + * Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + * `desc` for descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; +} + +/** + * @deprecated Use ChatCompletionListParams instead + */ +export type CompletionListParams = ChatCompletionListParams; + +Completions.ChatCompletionsPage = ChatCompletionsPage; +Completions.Messages = Messages; + +export declare namespace Completions { + export { + type ChatCompletion as ChatCompletion, + type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, + type ChatCompletionAudio as ChatCompletionAudio, + type ChatCompletionAudioParam as ChatCompletionAudioParam, + type ChatCompletionChunk as ChatCompletionChunk, + type ChatCompletionContentPart as ChatCompletionContentPart, + type ChatCompletionContentPartImage as ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText as ChatCompletionContentPartText, + type ChatCompletionDeleted as ChatCompletionDeleted, + type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, + type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, + type ChatCompletionMessage as ChatCompletionMessage, + type ChatCompletionMessageParam as ChatCompletionMessageParam, + type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, + type ChatCompletionModality as ChatCompletionModality, + type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent as ChatCompletionPredictionContent, + type ChatCompletionRole as ChatCompletionRole, + type ChatCompletionStoreMessage as ChatCompletionStoreMessage, + type ChatCompletionStreamOptions as ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, + type ChatCompletionTool as ChatCompletionTool, + type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, + ChatCompletionsPage as ChatCompletionsPage, + type ChatCompletionCreateParams as ChatCompletionCreateParams, + type CompletionCreateParams as CompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming as ChatCompletionCreateParamsStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + type ChatCompletionUpdateParams as ChatCompletionUpdateParams, + type CompletionUpdateParams as CompletionUpdateParams, + type ChatCompletionListParams as ChatCompletionListParams, + type CompletionListParams as CompletionListParams, + }; + + export { Messages as Messages, type MessageListParams as MessageListParams }; +} diff --git a/src/resources/chat/completions/index.ts b/src/resources/chat/completions/index.ts new file mode 100644 index 000000000..994d6f880 --- /dev/null +++ b/src/resources/chat/completions/index.ts @@ -0,0 +1,48 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + ChatCompletionStoreMessagesPage, + ChatCompletionsPage, + Completions, + type ChatCompletion, + type ChatCompletionAssistantMessageParam, + type ChatCompletionAudio, + type ChatCompletionAudioParam, + type ChatCompletionChunk, + type ChatCompletionContentPart, + type ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText, + type ChatCompletionDeleted, + type ChatCompletionDeveloperMessageParam, + type ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam, + type ChatCompletionMessage, + type ChatCompletionMessageParam, + type ChatCompletionMessageToolCall, + type ChatCompletionModality, + type ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent, + type ChatCompletionRole, + type ChatCompletionStoreMessage, + type ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob, + type ChatCompletionTool, + type ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage, + type ChatCompletionCreateParams, + type CompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming, + type CompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming, + type CompletionCreateParamsStreaming, + type ChatCompletionUpdateParams, + type CompletionUpdateParams, + type ChatCompletionListParams, + type CompletionListParams, +} from './completions'; +export { Messages, type MessageListParams } from './messages'; diff --git a/src/resources/chat/completions/messages.ts b/src/resources/chat/completions/messages.ts new file mode 100644 index 000000000..ab3eb73f6 --- /dev/null +++ b/src/resources/chat/completions/messages.ts @@ -0,0 +1,62 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import * as CompletionsAPI from './completions'; +import { ChatCompletionStoreMessagesPage } from './completions'; +import { type CursorPageParams } from '../../../pagination'; + +export class Messages extends APIResource { + /** + * Get the messages in a stored chat completion. Only Chat Completions that have + * been created with the `store` parameter set to `true` will be returned. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const chatCompletionStoreMessage of client.chat.completions.messages.list( + * 'completion_id', + * )) { + * // ... + * } + * ``` + */ + list( + completionId: string, + query?: MessageListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + completionId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + completionId: string, + query: MessageListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(completionId, {}, query); + } + return this._client.getAPIList( + `/chat/completions/${completionId}/messages`, + ChatCompletionStoreMessagesPage, + { query, ...options }, + ); + } +} + +export interface MessageListParams extends CursorPageParams { + /** + * Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + * for descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; +} + +export declare namespace Messages { + export { type MessageListParams as MessageListParams }; +} + +export { ChatCompletionStoreMessagesPage }; diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index 748770948..62ca758e0 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -1,34 +1,48 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export { Chat } from './chat'; export { - ChatCompletion, - ChatCompletionAssistantMessageParam, - ChatCompletionChunk, - ChatCompletionContentPart, - ChatCompletionContentPartImage, - ChatCompletionContentPartRefusal, - ChatCompletionContentPartText, - ChatCompletionFunctionCallOption, - ChatCompletionFunctionMessageParam, - ChatCompletionMessage, - ChatCompletionMessageParam, - ChatCompletionMessageToolCall, - ChatCompletionNamedToolChoice, - ChatCompletionRole, - ChatCompletionStreamOptions, - ChatCompletionSystemMessageParam, - ChatCompletionTokenLogprob, - ChatCompletionTool, - ChatCompletionToolChoiceOption, - ChatCompletionToolMessageParam, - ChatCompletionUserMessageParam, - CreateChatCompletionRequestMessage, - ChatCompletionCreateParams, - CompletionCreateParams, - ChatCompletionCreateParamsNonStreaming, - CompletionCreateParamsNonStreaming, - ChatCompletionCreateParamsStreaming, - CompletionCreateParamsStreaming, + ChatCompletionStoreMessagesPage, + ChatCompletionsPage, Completions, -} from './completions'; -export { ChatModel, Chat } from './chat'; + type ChatCompletion, + type ChatCompletionAssistantMessageParam, + type ChatCompletionAudio, + type ChatCompletionAudioParam, + type ChatCompletionChunk, + type ChatCompletionContentPart, + type ChatCompletionContentPartImage, + type ChatCompletionContentPartInputAudio, + type ChatCompletionContentPartRefusal, + type ChatCompletionContentPartText, + type ChatCompletionDeleted, + type ChatCompletionDeveloperMessageParam, + type ChatCompletionFunctionCallOption, + type ChatCompletionFunctionMessageParam, + type ChatCompletionMessage, + type ChatCompletionMessageParam, + type ChatCompletionMessageToolCall, + type ChatCompletionModality, + type ChatCompletionNamedToolChoice, + type ChatCompletionPredictionContent, + type ChatCompletionRole, + type ChatCompletionStoreMessage, + type ChatCompletionStreamOptions, + type ChatCompletionSystemMessageParam, + type ChatCompletionTokenLogprob, + type ChatCompletionTool, + type ChatCompletionToolChoiceOption, + type ChatCompletionToolMessageParam, + type ChatCompletionUserMessageParam, + type CreateChatCompletionRequestMessage, + type ChatCompletionCreateParams, + type CompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming, + type CompletionCreateParamsNonStreaming, + type ChatCompletionCreateParamsStreaming, + type CompletionCreateParamsStreaming, + type ChatCompletionUpdateParams, + type CompletionUpdateParams, + type ChatCompletionListParams, + type CompletionListParams, +} from './completions/index'; diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 7acd5d13f..07cb49ed9 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -4,12 +4,20 @@ import { APIResource } from '../resource'; import { APIPromise } from '../core'; import * as Core from '../core'; import * as CompletionsAPI from './completions'; -import * as ChatCompletionsAPI from './chat/completions'; +import * as CompletionsCompletionsAPI from './chat/completions/completions'; import { Stream } from '../streaming'; export class Completions extends APIResource { /** * Creates a completion for the provided prompt and parameters. + * + * @example + * ```ts + * const completion = await client.completions.create({ + * model: 'string', + * prompt: 'This is a test.', + * }); + * ``` */ create(body: CompletionCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; create( @@ -137,6 +145,12 @@ export namespace CompletionUsage { * Breakdown of tokens used in a completion. */ export interface CompletionTokensDetails { + /** + * When using Predicted Outputs, the number of tokens in the prediction that + * appeared in the completion. + */ + accepted_prediction_tokens?: number; + /** * Audio input tokens generated by the model. */ @@ -146,6 +160,14 @@ export namespace CompletionUsage { * Tokens generated by the model for reasoning. */ reasoning_tokens?: number; + + /** + * When using Predicted Outputs, the number of tokens in the prediction that did + * not appear in the completion. However, like reasoning tokens, these tokens are + * still counted in the total completion tokens for purposes of billing, output, + * and context window limits. + */ + rejected_prediction_tokens?: number; } /** @@ -171,8 +193,8 @@ export interface CompletionCreateParamsBase { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model: (string & {}) | 'gpt-3.5-turbo-instruct' | 'davinci-002' | 'babbage-002'; @@ -209,7 +231,7 @@ export interface CompletionCreateParamsBase { * existing frequency in the text so far, decreasing the model's likelihood to * repeat the same line verbatim. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ frequency_penalty?: number | null; @@ -264,7 +286,7 @@ export interface CompletionCreateParamsBase { * whether they appear in the text so far, increasing the model's likelihood to * talk about new topics. * - * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) + * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) */ presence_penalty?: number | null; @@ -279,6 +301,8 @@ export interface CompletionCreateParamsBase { seed?: number | null; /** + * Not supported with latest reasoning models `o3` and `o4-mini`. + * * Up to 4 sequences where the API will stop generating further tokens. The * returned text will not contain the stop sequence. */ @@ -297,7 +321,7 @@ export interface CompletionCreateParamsBase { /** * Options for streaming response. Only set this when you set `stream: true`. */ - stream_options?: ChatCompletionsAPI.ChatCompletionStreamOptions | null; + stream_options?: CompletionsCompletionsAPI.ChatCompletionStreamOptions | null; /** * The suffix that comes after a completion of inserted text. @@ -327,7 +351,7 @@ export interface CompletionCreateParamsBase { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } @@ -361,11 +385,13 @@ export interface CompletionCreateParamsStreaming extends CompletionCreateParamsB stream: true; } -export namespace Completions { - export import Completion = CompletionsAPI.Completion; - export import CompletionChoice = CompletionsAPI.CompletionChoice; - export import CompletionUsage = CompletionsAPI.CompletionUsage; - export import CompletionCreateParams = CompletionsAPI.CompletionCreateParams; - export import CompletionCreateParamsNonStreaming = CompletionsAPI.CompletionCreateParamsNonStreaming; - export import CompletionCreateParamsStreaming = CompletionsAPI.CompletionCreateParamsStreaming; +export declare namespace Completions { + export { + type Completion as Completion, + type CompletionChoice as CompletionChoice, + type CompletionUsage as CompletionUsage, + type CompletionCreateParams as CompletionCreateParams, + type CompletionCreateParamsNonStreaming as CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming as CompletionCreateParamsStreaming, + }; } diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts index 6d8e670a7..fb02a7654 100644 --- a/src/resources/embeddings.ts +++ b/src/resources/embeddings.ts @@ -2,17 +2,63 @@ import { APIResource } from '../resource'; import * as Core from '../core'; -import * as EmbeddingsAPI from './embeddings'; export class Embeddings extends APIResource { /** * Creates an embedding vector representing the input text. + * + * @example + * ```ts + * const createEmbeddingResponse = + * await client.embeddings.create({ + * input: 'The quick brown fox jumped over the lazy dog', + * model: 'text-embedding-3-small', + * }); + * ``` */ create( body: EmbeddingCreateParams, - options?: Core.RequestOptions, + options?: Core.RequestOptions, ): Core.APIPromise { - return this._client.post('/embeddings', { body, ...options }); + const hasUserProvidedEncodingFormat = !!body.encoding_format; + // No encoding_format specified, defaulting to base64 for performance reasons + // See https://github.com/openai/openai-node/pull/1312 + let encoding_format: EmbeddingCreateParams['encoding_format'] = + hasUserProvidedEncodingFormat ? body.encoding_format : 'base64'; + + if (hasUserProvidedEncodingFormat) { + Core.debug('Request', 'User defined encoding_format:', body.encoding_format); + } + + const response: Core.APIPromise = this._client.post('/embeddings', { + body: { + ...body, + encoding_format: encoding_format as EmbeddingCreateParams['encoding_format'], + }, + ...options, + }); + + // if the user specified an encoding_format, return the response as-is + if (hasUserProvidedEncodingFormat) { + return response; + } + + // in this stage, we are sure the user did not specify an encoding_format + // and we defaulted to base64 for performance reasons + // we are sure then that the response is base64 encoded, let's decode it + // the returned result will be a float32 array since this is OpenAI API's default encoding + Core.debug('response', 'Decoding base64 embeddings to float32 array'); + + return (response as Core.APIPromise)._thenUnwrap((response) => { + if (response && response.data) { + response.data.forEach((embeddingBase64Obj) => { + const embeddingBase64Str = embeddingBase64Obj.embedding as unknown as string; + embeddingBase64Obj.embedding = Core.toFloat32Array(embeddingBase64Str); + }); + } + + return response; + }); } } @@ -84,10 +130,12 @@ export interface EmbeddingCreateParams { * Input text to embed, encoded as a string or array of tokens. To embed multiple * inputs in a single request, pass an array of strings or array of token arrays. * The input must not exceed the max input tokens for the model (8192 tokens for - * `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + * all embedding models), cannot be an empty string, and any array must be 2048 * dimensions or less. * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - * for counting tokens. + * for counting tokens. In addition to the per-input token limit, all embedding + * models enforce a maximum of 300,000 tokens summed across all inputs in a single + * request. */ input: string | Array | Array | Array>; @@ -95,8 +143,8 @@ export interface EmbeddingCreateParams { * ID of the model to use. You can use the * [List models](https://platform.openai.com/docs/api-reference/models/list) API to * see all of your available models, or see our - * [Model overview](https://platform.openai.com/docs/models/overview) for - * descriptions of them. + * [Model overview](https://platform.openai.com/docs/models) for descriptions of + * them. */ model: (string & {}) | EmbeddingModel; @@ -115,14 +163,16 @@ export interface EmbeddingCreateParams { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } -export namespace Embeddings { - export import CreateEmbeddingResponse = EmbeddingsAPI.CreateEmbeddingResponse; - export import Embedding = EmbeddingsAPI.Embedding; - export import EmbeddingModel = EmbeddingsAPI.EmbeddingModel; - export import EmbeddingCreateParams = EmbeddingsAPI.EmbeddingCreateParams; +export declare namespace Embeddings { + export { + type CreateEmbeddingResponse as CreateEmbeddingResponse, + type Embedding as Embedding, + type EmbeddingModel as EmbeddingModel, + type EmbeddingCreateParams as EmbeddingCreateParams, + }; } diff --git a/src/resources/evals.ts b/src/resources/evals.ts new file mode 100644 index 000000000..b611710e1 --- /dev/null +++ b/src/resources/evals.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './evals/index'; diff --git a/src/resources/evals/evals.ts b/src/resources/evals/evals.ts new file mode 100644 index 000000000..05a656619 --- /dev/null +++ b/src/resources/evals/evals.ts @@ -0,0 +1,909 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as Shared from '../shared'; +import * as GraderModelsAPI from '../graders/grader-models'; +import * as ResponsesAPI from '../responses/responses'; +import * as RunsAPI from './runs/runs'; +import { + CreateEvalCompletionsRunDataSource, + CreateEvalJSONLRunDataSource, + EvalAPIError, + RunCancelResponse, + RunCreateParams, + RunCreateResponse, + RunDeleteResponse, + RunListParams, + RunListResponse, + RunListResponsesPage, + RunRetrieveResponse, + Runs, +} from './runs/runs'; +import { CursorPage, type CursorPageParams } from '../../pagination'; + +export class Evals extends APIResource { + runs: RunsAPI.Runs = new RunsAPI.Runs(this._client); + + /** + * Create the structure of an evaluation that can be used to test a model's + * performance. An evaluation is a set of testing criteria and the config for a + * data source, which dictates the schema of the data used in the evaluation. After + * creating an evaluation, you can run it on different models and model parameters. + * We support several types of graders and datasources. For more information, see + * the [Evals guide](https://platform.openai.com/docs/guides/evals). + */ + create(body: EvalCreateParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/evals', { body, ...options }); + } + + /** + * Get an evaluation by ID. + */ + retrieve(evalId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.get(`/evals/${evalId}`, options); + } + + /** + * Update certain properties of an evaluation. + */ + update( + evalId: string, + body: EvalUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/evals/${evalId}`, { body, ...options }); + } + + /** + * List evaluations for a project. + */ + list( + query?: EvalListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list(options?: Core.RequestOptions): Core.PagePromise; + list( + query: EvalListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list({}, query); + } + return this._client.getAPIList('/evals', EvalListResponsesPage, { query, ...options }); + } + + /** + * Delete an evaluation. + */ + del(evalId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/evals/${evalId}`, options); + } +} + +export class EvalListResponsesPage extends CursorPage {} + +/** + * A CustomDataSourceConfig which specifies the schema of your `item` and + * optionally `sample` namespaces. The response schema defines the shape of the + * data that will be: + * + * - Used to define your testing criteria and + * - What data is required when creating a run + */ +export interface EvalCustomDataSourceConfig { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `custom`. + */ + type: 'custom'; +} + +/** + * @deprecated Deprecated in favor of LogsDataSourceConfig. + */ +export interface EvalStoredCompletionsDataSourceConfig { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `stored_completions`. + */ + type: 'stored_completions'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o4-mini is better at my usecase than gpt-4o + */ +export interface EvalCreateResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: + | EvalCustomDataSourceConfig + | EvalCreateResponse.Logs + | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * A list of testing criteria. + */ + testing_criteria: Array< + | GraderModelsAPI.LabelModelGrader + | GraderModelsAPI.StringCheckGrader + | EvalCreateResponse.EvalGraderTextSimilarity + | EvalCreateResponse.EvalGraderPython + | EvalCreateResponse.EvalGraderScoreModel + >; +} + +export namespace EvalCreateResponse { + /** + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ + export interface Logs { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + + /** + * A TextSimilarityGrader object which grades text based on similarity metrics. + */ + export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader { + /** + * The threshold for the score. + */ + pass_threshold: number; + } + + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface EvalGraderPython extends GraderModelsAPI.PythonGrader { + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ + export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader { + /** + * The threshold for the score. + */ + pass_threshold?: number; + } +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o4-mini is better at my usecase than gpt-4o + */ +export interface EvalRetrieveResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: + | EvalCustomDataSourceConfig + | EvalRetrieveResponse.Logs + | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * A list of testing criteria. + */ + testing_criteria: Array< + | GraderModelsAPI.LabelModelGrader + | GraderModelsAPI.StringCheckGrader + | EvalRetrieveResponse.EvalGraderTextSimilarity + | EvalRetrieveResponse.EvalGraderPython + | EvalRetrieveResponse.EvalGraderScoreModel + >; +} + +export namespace EvalRetrieveResponse { + /** + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ + export interface Logs { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + + /** + * A TextSimilarityGrader object which grades text based on similarity metrics. + */ + export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader { + /** + * The threshold for the score. + */ + pass_threshold: number; + } + + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface EvalGraderPython extends GraderModelsAPI.PythonGrader { + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ + export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader { + /** + * The threshold for the score. + */ + pass_threshold?: number; + } +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o4-mini is better at my usecase than gpt-4o + */ +export interface EvalUpdateResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: + | EvalCustomDataSourceConfig + | EvalUpdateResponse.Logs + | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * A list of testing criteria. + */ + testing_criteria: Array< + | GraderModelsAPI.LabelModelGrader + | GraderModelsAPI.StringCheckGrader + | EvalUpdateResponse.EvalGraderTextSimilarity + | EvalUpdateResponse.EvalGraderPython + | EvalUpdateResponse.EvalGraderScoreModel + >; +} + +export namespace EvalUpdateResponse { + /** + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ + export interface Logs { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + + /** + * A TextSimilarityGrader object which grades text based on similarity metrics. + */ + export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader { + /** + * The threshold for the score. + */ + pass_threshold: number; + } + + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface EvalGraderPython extends GraderModelsAPI.PythonGrader { + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ + export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader { + /** + * The threshold for the score. + */ + pass_threshold?: number; + } +} + +/** + * An Eval object with a data source config and testing criteria. An Eval + * represents a task to be done for your LLM integration. Like: + * + * - Improve the quality of my chatbot + * - See how well my chatbot handles customer support + * - Check if o4-mini is better at my usecase than gpt-4o + */ +export interface EvalListResponse { + /** + * Unique identifier for the evaluation. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the eval was created. + */ + created_at: number; + + /** + * Configuration of data sources used in runs of the evaluation. + */ + data_source_config: + | EvalCustomDataSourceConfig + | EvalListResponse.Logs + | EvalStoredCompletionsDataSourceConfig; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name: string; + + /** + * The object type. + */ + object: 'eval'; + + /** + * A list of testing criteria. + */ + testing_criteria: Array< + | GraderModelsAPI.LabelModelGrader + | GraderModelsAPI.StringCheckGrader + | EvalListResponse.EvalGraderTextSimilarity + | EvalListResponse.EvalGraderPython + | EvalListResponse.EvalGraderScoreModel + >; +} + +export namespace EvalListResponse { + /** + * A LogsDataSourceConfig which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The + * schema returned by this data source config is used to defined what variables are + * available in your evals. `item` and `sample` are both defined when using this + * data source config. + */ + export interface Logs { + /** + * The json schema for the run data source items. Learn how to build JSON schemas + * [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + } + + /** + * A TextSimilarityGrader object which grades text based on similarity metrics. + */ + export interface EvalGraderTextSimilarity extends GraderModelsAPI.TextSimilarityGrader { + /** + * The threshold for the score. + */ + pass_threshold: number; + } + + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface EvalGraderPython extends GraderModelsAPI.PythonGrader { + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ + export interface EvalGraderScoreModel extends GraderModelsAPI.ScoreModelGrader { + /** + * The threshold for the score. + */ + pass_threshold?: number; + } +} + +export interface EvalDeleteResponse { + deleted: boolean; + + eval_id: string; + + object: string; +} + +export interface EvalCreateParams { + /** + * The configuration for the data source used for the evaluation runs. Dictates the + * schema of the data used in the evaluation. + */ + data_source_config: EvalCreateParams.Custom | EvalCreateParams.Logs | EvalCreateParams.StoredCompletions; + + /** + * A list of graders for all eval runs in this group. Graders can reference + * variables in the data source using double curly braces notation, like + * `{{item.variable_name}}`. To reference the model's output, use the `sample` + * namespace (ie, `{{sample.output_text}}`). + */ + testing_criteria: Array< + | EvalCreateParams.LabelModel + | GraderModelsAPI.StringCheckGrader + | EvalCreateParams.TextSimilarity + | EvalCreateParams.Python + | EvalCreateParams.ScoreModel + >; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * The name of the evaluation. + */ + name?: string; +} + +export namespace EvalCreateParams { + /** + * A CustomDataSourceConfig object that defines the schema for the data source used + * for the evaluation runs. This schema is used to define the shape of the data + * that will be: + * + * - Used to define your testing criteria and + * - What data is required when creating a run + */ + export interface Custom { + /** + * The json schema for each row in the data source. + */ + item_schema: Record; + + /** + * The type of data source. Always `custom`. + */ + type: 'custom'; + + /** + * Whether the eval should expect you to populate the sample namespace (ie, by + * generating responses off of your data source) + */ + include_sample_schema?: boolean; + } + + /** + * A data source config which specifies the metadata property of your logs query. + * This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + */ + export interface Logs { + /** + * The type of data source. Always `logs`. + */ + type: 'logs'; + + /** + * Metadata filters for the logs data source. + */ + metadata?: Record; + } + + /** + * @deprecated Deprecated in favor of LogsDataSourceConfig. + */ + export interface StoredCompletions { + /** + * The type of data source. Always `stored_completions`. + */ + type: 'stored_completions'; + + /** + * Metadata filters for the stored completions data source. + */ + metadata?: Record; + } + + /** + * A LabelModelGrader object which uses a model to assign labels to each item in + * the evaluation. + */ + export interface LabelModel { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the `item` namespace, ie {{item.name}}. + */ + input: Array; + + /** + * The labels to classify to each item in the evaluation. + */ + labels: Array; + + /** + * The model to use for the evaluation. Must support structured outputs. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The labels that indicate a passing result. Must be a subset of labels. + */ + passing_labels: Array; + + /** + * The object type, which is always `label_model`. + */ + type: 'label_model'; + } + + export namespace LabelModel { + export interface SimpleInputMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + /** + * A TextSimilarityGrader object which grades text based on similarity metrics. + */ + export interface TextSimilarity extends GraderModelsAPI.TextSimilarityGrader { + /** + * The threshold for the score. + */ + pass_threshold: number; + } + + /** + * A PythonGrader object that runs a python script on the input. + */ + export interface Python extends GraderModelsAPI.PythonGrader { + /** + * The threshold for the score. + */ + pass_threshold?: number; + } + + /** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ + export interface ScoreModel extends GraderModelsAPI.ScoreModelGrader { + /** + * The threshold for the score. + */ + pass_threshold?: number; + } +} + +export interface EvalUpdateParams { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * Rename the evaluation. + */ + name?: string; +} + +export interface EvalListParams extends CursorPageParams { + /** + * Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for + * descending order. + */ + order?: 'asc' | 'desc'; + + /** + * Evals can be ordered by creation time or last updated time. Use `created_at` for + * creation time or `updated_at` for last updated time. + */ + order_by?: 'created_at' | 'updated_at'; +} + +Evals.EvalListResponsesPage = EvalListResponsesPage; +Evals.Runs = Runs; +Evals.RunListResponsesPage = RunListResponsesPage; + +export declare namespace Evals { + export { + type EvalCustomDataSourceConfig as EvalCustomDataSourceConfig, + type EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, + type EvalCreateResponse as EvalCreateResponse, + type EvalRetrieveResponse as EvalRetrieveResponse, + type EvalUpdateResponse as EvalUpdateResponse, + type EvalListResponse as EvalListResponse, + type EvalDeleteResponse as EvalDeleteResponse, + EvalListResponsesPage as EvalListResponsesPage, + type EvalCreateParams as EvalCreateParams, + type EvalUpdateParams as EvalUpdateParams, + type EvalListParams as EvalListParams, + }; + + export { + Runs as Runs, + type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, + type EvalAPIError as EvalAPIError, + type RunCreateResponse as RunCreateResponse, + type RunRetrieveResponse as RunRetrieveResponse, + type RunListResponse as RunListResponse, + type RunDeleteResponse as RunDeleteResponse, + type RunCancelResponse as RunCancelResponse, + RunListResponsesPage as RunListResponsesPage, + type RunCreateParams as RunCreateParams, + type RunListParams as RunListParams, + }; +} diff --git a/src/resources/evals/index.ts b/src/resources/evals/index.ts new file mode 100644 index 000000000..b2627fbf3 --- /dev/null +++ b/src/resources/evals/index.ts @@ -0,0 +1,30 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + EvalListResponsesPage, + Evals, + type EvalCustomDataSourceConfig, + type EvalStoredCompletionsDataSourceConfig, + type EvalCreateResponse, + type EvalRetrieveResponse, + type EvalUpdateResponse, + type EvalListResponse, + type EvalDeleteResponse, + type EvalCreateParams, + type EvalUpdateParams, + type EvalListParams, +} from './evals'; +export { + RunListResponsesPage, + Runs, + type CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource, + type EvalAPIError, + type RunCreateResponse, + type RunRetrieveResponse, + type RunListResponse, + type RunDeleteResponse, + type RunCancelResponse, + type RunCreateParams, + type RunListParams, +} from './runs/index'; diff --git a/src/resources/evals/runs.ts b/src/resources/evals/runs.ts new file mode 100644 index 000000000..a3cc2bc7f --- /dev/null +++ b/src/resources/evals/runs.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './runs/index'; diff --git a/src/resources/evals/runs/index.ts b/src/resources/evals/runs/index.ts new file mode 100644 index 000000000..d0e18bff4 --- /dev/null +++ b/src/resources/evals/runs/index.ts @@ -0,0 +1,23 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + OutputItemListResponsesPage, + OutputItems, + type OutputItemRetrieveResponse, + type OutputItemListResponse, + type OutputItemListParams, +} from './output-items'; +export { + RunListResponsesPage, + Runs, + type CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource, + type EvalAPIError, + type RunCreateResponse, + type RunRetrieveResponse, + type RunListResponse, + type RunDeleteResponse, + type RunCancelResponse, + type RunCreateParams, + type RunListParams, +} from './runs'; diff --git a/src/resources/evals/runs/output-items.ts b/src/resources/evals/runs/output-items.ts new file mode 100644 index 000000000..ee947c60f --- /dev/null +++ b/src/resources/evals/runs/output-items.ts @@ -0,0 +1,410 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import * as RunsAPI from './runs'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; + +export class OutputItems extends APIResource { + /** + * Get an evaluation run output item by ID. + */ + retrieve( + evalId: string, + runId: string, + outputItemId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/evals/${evalId}/runs/${runId}/output_items/${outputItemId}`, options); + } + + /** + * Get a list of output items for an evaluation run. + */ + list( + evalId: string, + runId: string, + query?: OutputItemListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + evalId: string, + runId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + evalId: string, + runId: string, + query: OutputItemListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(evalId, runId, {}, query); + } + return this._client.getAPIList( + `/evals/${evalId}/runs/${runId}/output_items`, + OutputItemListResponsesPage, + { query, ...options }, + ); + } +} + +export class OutputItemListResponsesPage extends CursorPage {} + +/** + * A schema representing an evaluation run output item. + */ +export interface OutputItemRetrieveResponse { + /** + * Unique identifier for the evaluation run output item. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Details of the input data source item. + */ + datasource_item: Record; + + /** + * The identifier for the data source item. + */ + datasource_item_id: number; + + /** + * The identifier of the evaluation group. + */ + eval_id: string; + + /** + * The type of the object. Always "eval.run.output_item". + */ + object: 'eval.run.output_item'; + + /** + * A list of results from the evaluation run. + */ + results: Array>; + + /** + * The identifier of the evaluation run associated with this output item. + */ + run_id: string; + + /** + * A sample containing the input and output of the evaluation run. + */ + sample: OutputItemRetrieveResponse.Sample; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace OutputItemRetrieveResponse { + /** + * A sample containing the input and output of the evaluation run. + */ + export interface Sample { + /** + * An object representing an error response from the Eval API. + */ + error: RunsAPI.EvalAPIError; + + /** + * The reason why the sample generation was finished. + */ + finish_reason: string; + + /** + * An array of input messages. + */ + input: Array; + + /** + * The maximum number of tokens allowed for completion. + */ + max_completion_tokens: number; + + /** + * The model used for generating the sample. + */ + model: string; + + /** + * An array of output messages. + */ + output: Array; + + /** + * The seed used for generating the sample. + */ + seed: number; + + /** + * The sampling temperature used. + */ + temperature: number; + + /** + * The top_p value used for sampling. + */ + top_p: number; + + /** + * Token usage details for the sample. + */ + usage: Sample.Usage; + } + + export namespace Sample { + /** + * An input message. + */ + export interface Input { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message sender (e.g., system, user, developer). + */ + role: string; + } + + export interface Output { + /** + * The content of the message. + */ + content?: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role?: string; + } + + /** + * Token usage details for the sample. + */ + export interface Usage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + } +} + +/** + * A schema representing an evaluation run output item. + */ +export interface OutputItemListResponse { + /** + * Unique identifier for the evaluation run output item. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Details of the input data source item. + */ + datasource_item: Record; + + /** + * The identifier for the data source item. + */ + datasource_item_id: number; + + /** + * The identifier of the evaluation group. + */ + eval_id: string; + + /** + * The type of the object. Always "eval.run.output_item". + */ + object: 'eval.run.output_item'; + + /** + * A list of results from the evaluation run. + */ + results: Array>; + + /** + * The identifier of the evaluation run associated with this output item. + */ + run_id: string; + + /** + * A sample containing the input and output of the evaluation run. + */ + sample: OutputItemListResponse.Sample; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace OutputItemListResponse { + /** + * A sample containing the input and output of the evaluation run. + */ + export interface Sample { + /** + * An object representing an error response from the Eval API. + */ + error: RunsAPI.EvalAPIError; + + /** + * The reason why the sample generation was finished. + */ + finish_reason: string; + + /** + * An array of input messages. + */ + input: Array; + + /** + * The maximum number of tokens allowed for completion. + */ + max_completion_tokens: number; + + /** + * The model used for generating the sample. + */ + model: string; + + /** + * An array of output messages. + */ + output: Array; + + /** + * The seed used for generating the sample. + */ + seed: number; + + /** + * The sampling temperature used. + */ + temperature: number; + + /** + * The top_p value used for sampling. + */ + top_p: number; + + /** + * Token usage details for the sample. + */ + usage: Sample.Usage; + } + + export namespace Sample { + /** + * An input message. + */ + export interface Input { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message sender (e.g., system, user, developer). + */ + role: string; + } + + export interface Output { + /** + * The content of the message. + */ + content?: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role?: string; + } + + /** + * Token usage details for the sample. + */ + export interface Usage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + } +} + +export interface OutputItemListParams extends CursorPageParams { + /** + * Sort order for output items by timestamp. Use `asc` for ascending order or + * `desc` for descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; + + /** + * Filter output items by status. Use `failed` to filter by failed output items or + * `pass` to filter by passed output items. + */ + status?: 'fail' | 'pass'; +} + +OutputItems.OutputItemListResponsesPage = OutputItemListResponsesPage; + +export declare namespace OutputItems { + export { + type OutputItemRetrieveResponse as OutputItemRetrieveResponse, + type OutputItemListResponse as OutputItemListResponse, + OutputItemListResponsesPage as OutputItemListResponsesPage, + type OutputItemListParams as OutputItemListParams, + }; +} diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts new file mode 100644 index 000000000..e761e2160 --- /dev/null +++ b/src/resources/evals/runs/runs.ts @@ -0,0 +1,2228 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import * as Shared from '../../shared'; +import * as ResponsesAPI from '../../responses/responses'; +import * as OutputItemsAPI from './output-items'; +import { + OutputItemListParams, + OutputItemListResponse, + OutputItemListResponsesPage, + OutputItemRetrieveResponse, + OutputItems, +} from './output-items'; +import { CursorPage, type CursorPageParams } from '../../../pagination'; + +export class Runs extends APIResource { + outputItems: OutputItemsAPI.OutputItems = new OutputItemsAPI.OutputItems(this._client); + + /** + * Kicks off a new run for a given evaluation, specifying the data source, and what + * model configuration to use to test. The datasource will be validated against the + * schema specified in the config of the evaluation. + */ + create( + evalId: string, + body: RunCreateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/evals/${evalId}/runs`, { body, ...options }); + } + + /** + * Get an evaluation run by ID. + */ + retrieve( + evalId: string, + runId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.get(`/evals/${evalId}/runs/${runId}`, options); + } + + /** + * Get a list of runs for an evaluation. + */ + list( + evalId: string, + query?: RunListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + evalId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + evalId: string, + query: RunListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(evalId, {}, query); + } + return this._client.getAPIList(`/evals/${evalId}/runs`, RunListResponsesPage, { query, ...options }); + } + + /** + * Delete an eval run. + */ + del(evalId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/evals/${evalId}/runs/${runId}`, options); + } + + /** + * Cancel an ongoing evaluation run. + */ + cancel(evalId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/evals/${evalId}/runs/${runId}`, options); + } +} + +export class RunListResponsesPage extends CursorPage {} + +/** + * A CompletionsRunDataSource object describing a model sampling configuration. + */ +export interface CreateEvalCompletionsRunDataSource { + /** + * Determines what populates the `item` namespace in this run's data source. + */ + source: + | CreateEvalCompletionsRunDataSource.FileContent + | CreateEvalCompletionsRunDataSource.FileID + | CreateEvalCompletionsRunDataSource.StoredCompletions; + + /** + * The type of run data source. Always `completions`. + */ + type: 'completions'; + + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ + input_messages?: + | CreateEvalCompletionsRunDataSource.Template + | CreateEvalCompletionsRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: CreateEvalCompletionsRunDataSource.SamplingParams; +} + +export namespace CreateEvalCompletionsRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A StoredCompletionsRunDataSource configuration describing a set of filters + */ + export interface StoredCompletions { + /** + * The type of source. Always `stored_completions`. + */ + type: 'stored_completions'; + + /** + * An optional Unix timestamp to filter items created after this time. + */ + created_after?: number | null; + + /** + * An optional Unix timestamp to filter items created before this time. + */ + created_before?: number | null; + + /** + * An optional maximum number of items to return. + */ + limit?: number | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * An optional model to filter by (e.g., 'gpt-4o'). + */ + model?: string | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the `item` namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Message { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Message.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Message { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the `item` namespace. Ie, "item.input_trajectory" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } +} + +/** + * A JsonlRunDataSource object with that specifies a JSONL file that matches the + * eval + */ +export interface CreateEvalJSONLRunDataSource { + /** + * Determines what populates the `item` namespace in the data source. + */ + source: CreateEvalJSONLRunDataSource.FileContent | CreateEvalJSONLRunDataSource.FileID; + + /** + * The type of data source. Always `jsonl`. + */ + type: 'jsonl'; +} + +export namespace CreateEvalJSONLRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } +} + +/** + * An object representing an error response from the Eval API. + */ +export interface EvalAPIError { + /** + * The error code. + */ + code: string; + + /** + * The error message. + */ + message: string; +} + +/** + * A schema representing an evaluation run. + */ +export interface RunCreateResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunCreateResponse.Responses; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunCreateResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunCreateResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Responses { + /** + * Determines what populates the `item` namespace in this run's data source. + */ + source: Responses.FileContent | Responses.FileID | Responses.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ + input_messages?: Responses.Template | Responses.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Responses.SamplingParams; + } + + export namespace Responses { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the `item` namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the `item` namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +/** + * A schema representing an evaluation run. + */ +export interface RunRetrieveResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunRetrieveResponse.Responses; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunRetrieveResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunRetrieveResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Responses { + /** + * Determines what populates the `item` namespace in this run's data source. + */ + source: Responses.FileContent | Responses.FileID | Responses.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ + input_messages?: Responses.Template | Responses.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Responses.SamplingParams; + } + + export namespace Responses { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the `item` namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the `item` namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +/** + * A schema representing an evaluation run. + */ +export interface RunListResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: CreateEvalJSONLRunDataSource | CreateEvalCompletionsRunDataSource | RunListResponse.Responses; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunListResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunListResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Responses { + /** + * Determines what populates the `item` namespace in this run's data source. + */ + source: Responses.FileContent | Responses.FileID | Responses.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ + input_messages?: Responses.Template | Responses.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Responses.SamplingParams; + } + + export namespace Responses { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the `item` namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the `item` namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +export interface RunDeleteResponse { + deleted?: boolean; + + object?: string; + + run_id?: string; +} + +/** + * A schema representing an evaluation run. + */ +export interface RunCancelResponse { + /** + * Unique identifier for the evaluation run. + */ + id: string; + + /** + * Unix timestamp (in seconds) when the evaluation run was created. + */ + created_at: number; + + /** + * Information about the run's data source. + */ + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunCancelResponse.Responses; + + /** + * An object representing an error response from the Eval API. + */ + error: EvalAPIError; + + /** + * The identifier of the associated evaluation. + */ + eval_id: string; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * The model that is evaluated, if applicable. + */ + model: string; + + /** + * The name of the evaluation run. + */ + name: string; + + /** + * The type of the object. Always "eval.run". + */ + object: 'eval.run'; + + /** + * Usage statistics for each model during the evaluation run. + */ + per_model_usage: Array; + + /** + * Results per testing criteria applied during the evaluation run. + */ + per_testing_criteria_results: Array; + + /** + * The URL to the rendered evaluation run report on the UI dashboard. + */ + report_url: string; + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + result_counts: RunCancelResponse.ResultCounts; + + /** + * The status of the evaluation run. + */ + status: string; +} + +export namespace RunCancelResponse { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface Responses { + /** + * Determines what populates the `item` namespace in this run's data source. + */ + source: Responses.FileContent | Responses.FileID | Responses.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ + input_messages?: Responses.Template | Responses.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: Responses.SamplingParams; + } + + export namespace Responses { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the `item` namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the `item` namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } + + export interface PerModelUsage { + /** + * The number of tokens retrieved from cache. + */ + cached_tokens: number; + + /** + * The number of completion tokens generated. + */ + completion_tokens: number; + + /** + * The number of invocations. + */ + invocation_count: number; + + /** + * The name of the model. + */ + model_name: string; + + /** + * The number of prompt tokens used. + */ + prompt_tokens: number; + + /** + * The total number of tokens used. + */ + total_tokens: number; + } + + export interface PerTestingCriteriaResult { + /** + * Number of tests failed for this criteria. + */ + failed: number; + + /** + * Number of tests passed for this criteria. + */ + passed: number; + + /** + * A description of the testing criteria. + */ + testing_criteria: string; + } + + /** + * Counters summarizing the outcomes of the evaluation run. + */ + export interface ResultCounts { + /** + * Number of output items that resulted in an error. + */ + errored: number; + + /** + * Number of output items that failed to pass the evaluation. + */ + failed: number; + + /** + * Number of output items that passed the evaluation. + */ + passed: number; + + /** + * Total number of executed output items. + */ + total: number; + } +} + +export interface RunCreateParams { + /** + * Details about the run's data source. + */ + data_source: + | CreateEvalJSONLRunDataSource + | CreateEvalCompletionsRunDataSource + | RunCreateParams.CreateEvalResponsesRunDataSource; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * The name of the run. + */ + name?: string; +} + +export namespace RunCreateParams { + /** + * A ResponsesRunDataSource object describing a model sampling configuration. + */ + export interface CreateEvalResponsesRunDataSource { + /** + * Determines what populates the `item` namespace in this run's data source. + */ + source: + | CreateEvalResponsesRunDataSource.FileContent + | CreateEvalResponsesRunDataSource.FileID + | CreateEvalResponsesRunDataSource.Responses; + + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Used when sampling from a model. Dictates the structure of the messages passed + * into the model. Can either be a reference to a prebuilt trajectory (ie, + * `item.input_trajectory`), or a template with variable references to the `item` + * namespace. + */ + input_messages?: + | CreateEvalResponsesRunDataSource.Template + | CreateEvalResponsesRunDataSource.ItemReference; + + /** + * The name of the model to use for generating completions (e.g. "o3-mini"). + */ + model?: string; + + sampling_params?: CreateEvalResponsesRunDataSource.SamplingParams; + } + + export namespace CreateEvalResponsesRunDataSource { + export interface FileContent { + /** + * The content of the jsonl file. + */ + content: Array; + + /** + * The type of jsonl source. Always `file_content`. + */ + type: 'file_content'; + } + + export namespace FileContent { + export interface Content { + item: Record; + + sample?: Record; + } + } + + export interface FileID { + /** + * The identifier of the file. + */ + id: string; + + /** + * The type of jsonl source. Always `file_id`. + */ + type: 'file_id'; + } + + /** + * A EvalResponsesSource object describing a run data source configuration. + */ + export interface Responses { + /** + * The type of run data source. Always `responses`. + */ + type: 'responses'; + + /** + * Only include items created after this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_after?: number | null; + + /** + * Only include items created before this timestamp (inclusive). This is a query + * parameter used to select responses. + */ + created_before?: number | null; + + /** + * Optional string to search the 'instructions' field. This is a query parameter + * used to select responses. + */ + instructions_search?: string | null; + + /** + * Metadata filter for the responses. This is a query parameter used to select + * responses. + */ + metadata?: unknown | null; + + /** + * The name of the model to find responses for. This is a query parameter used to + * select responses. + */ + model?: string | null; + + /** + * Optional reasoning effort parameter. This is a query parameter used to select + * responses. + */ + reasoning_effort?: Shared.ReasoningEffort | null; + + /** + * Sampling temperature. This is a query parameter used to select responses. + */ + temperature?: number | null; + + /** + * List of tool names. This is a query parameter used to select responses. + */ + tools?: Array | null; + + /** + * Nucleus sampling parameter. This is a query parameter used to select responses. + */ + top_p?: number | null; + + /** + * List of user identifiers. This is a query parameter used to select responses. + */ + users?: Array | null; + } + + export interface Template { + /** + * A list of chat messages forming the prompt or context. May include variable + * references to the `item` namespace, ie {{item.name}}. + */ + template: Array; + + /** + * The type of input messages. Always `template`. + */ + type: 'template'; + } + + export namespace Template { + export interface ChatMessage { + /** + * The content of the message. + */ + content: string; + + /** + * The role of the message (e.g. "system", "assistant", "user"). + */ + role: string; + } + + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface EvalItem { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | EvalItem.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace EvalItem { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } + } + + export interface ItemReference { + /** + * A reference to a variable in the `item` namespace. Ie, "item.name" + */ + item_reference: string; + + /** + * The type of input messages. Always `item_reference`. + */ + type: 'item_reference'; + } + + export interface SamplingParams { + /** + * The maximum number of tokens in the generated output. + */ + max_completion_tokens?: number; + + /** + * A seed value to initialize the randomness, during sampling. + */ + seed?: number; + + /** + * A higher temperature increases randomness in the outputs. + */ + temperature?: number; + + /** + * An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + */ + top_p?: number; + } + } +} + +export interface RunListParams extends CursorPageParams { + /** + * Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for + * descending order. Defaults to `asc`. + */ + order?: 'asc' | 'desc'; + + /** + * Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed` + * | `canceled`. + */ + status?: 'queued' | 'in_progress' | 'completed' | 'canceled' | 'failed'; +} + +Runs.RunListResponsesPage = RunListResponsesPage; +Runs.OutputItems = OutputItems; +Runs.OutputItemListResponsesPage = OutputItemListResponsesPage; + +export declare namespace Runs { + export { + type CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, + type CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource, + type EvalAPIError as EvalAPIError, + type RunCreateResponse as RunCreateResponse, + type RunRetrieveResponse as RunRetrieveResponse, + type RunListResponse as RunListResponse, + type RunDeleteResponse as RunDeleteResponse, + type RunCancelResponse as RunCancelResponse, + RunListResponsesPage as RunListResponsesPage, + type RunCreateParams as RunCreateParams, + type RunListParams as RunListParams, + }; + + export { + OutputItems as OutputItems, + type OutputItemRetrieveResponse as OutputItemRetrieveResponse, + type OutputItemListResponse as OutputItemListResponse, + OutputItemListResponsesPage as OutputItemListResponsesPage, + type OutputItemListParams as OutputItemListParams, + }; +} diff --git a/src/resources/files.ts b/src/resources/files.ts index ba01a9041..723ac4cde 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -5,8 +5,7 @@ import { isRequestOptions } from '../core'; import { sleep } from '../core'; import { APIConnectionTimeoutError } from '../error'; import * as Core from '../core'; -import * as FilesAPI from './files'; -import { Page } from '../pagination'; +import { CursorPage, type CursorPageParams } from '../pagination'; import { type Response } from '../_shims/index'; export class Files extends APIResource { @@ -26,7 +25,7 @@ export class Files extends APIResource { * [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) * models. * - * The Batch API only supports `.jsonl` files up to 100 MB in size. The input also + * The Batch API only supports `.jsonl` files up to 200 MB in size. The input also * has a specific required * [format](https://platform.openai.com/docs/api-reference/batch/request-input). * @@ -45,7 +44,7 @@ export class Files extends APIResource { } /** - * Returns a list of files that belong to the user's organization. + * Returns a list of files. */ list(query?: FileListParams, options?: Core.RequestOptions): Core.PagePromise; list(options?: Core.RequestOptions): Core.PagePromise; @@ -70,7 +69,11 @@ export class Files extends APIResource { * Returns the contents of the specified file. */ content(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/files/${fileId}/content`, { ...options, __binaryResponse: true }); + return this._client.get(`/files/${fileId}/content`, { + ...options, + headers: { Accept: 'application/binary', ...options?.headers }, + __binaryResponse: true, + }); } /** @@ -79,10 +82,7 @@ export class Files extends APIResource { * @deprecated The `.content()` method should be used instead */ retrieveContent(fileId: string, options?: Core.RequestOptions): Core.APIPromise { - return this._client.get(`/files/${fileId}/content`, { - ...options, - headers: { Accept: 'application/json', ...options?.headers }, - }); + return this._client.get(`/files/${fileId}/content`, options); } /** @@ -112,10 +112,7 @@ export class Files extends APIResource { } } -/** - * Note: no pagination actually occurs yet, this is for forwards-compatibility. - */ -export class FileObjectsPage extends Page {} +export class FileObjectsPage extends CursorPage {} export type FileContent = string; @@ -171,29 +168,30 @@ export interface FileObject { | 'vision'; /** - * @deprecated: Deprecated. The current status of the file, which can be either + * @deprecated Deprecated. The current status of the file, which can be either * `uploaded`, `processed`, or `error`. */ status: 'uploaded' | 'processed' | 'error'; /** - * @deprecated: Deprecated. For details on why a fine-tuning training file failed + * The Unix timestamp (in seconds) for when the file will expire. + */ + expires_at?: number; + + /** + * @deprecated Deprecated. For details on why a fine-tuning training file failed * validation, see the `error` field on `fine_tuning.job`. */ status_details?: string; } /** - * The intended purpose of the uploaded file. - * - * Use "assistants" for - * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - * [Message](https://platform.openai.com/docs/api-reference/messages) files, - * "vision" for Assistants image file inputs, "batch" for - * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + * The intended purpose of the uploaded file. One of: - `assistants`: Used in the + * Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + * fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + * Flexible file type for any purpose - `evals`: Used for eval data sets */ -export type FilePurpose = 'assistants' | 'batch' | 'fine-tune' | 'vision'; +export type FilePurpose = 'assistants' | 'batch' | 'fine-tune' | 'vision' | 'user_data' | 'evals'; export interface FileCreateParams { /** @@ -202,31 +200,37 @@ export interface FileCreateParams { file: Core.Uploadable; /** - * The intended purpose of the uploaded file. - * - * Use "assistants" for - * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - * [Message](https://platform.openai.com/docs/api-reference/messages) files, - * "vision" for Assistants image file inputs, "batch" for - * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + * The intended purpose of the uploaded file. One of: - `assistants`: Used in the + * Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + * fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + * Flexible file type for any purpose - `evals`: Used for eval data sets */ purpose: FilePurpose; } -export interface FileListParams { +export interface FileListParams extends CursorPageParams { + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending + * order and `desc` for descending order. + */ + order?: 'asc' | 'desc'; + /** * Only return files with the given purpose. */ purpose?: string; } -export namespace Files { - export import FileContent = FilesAPI.FileContent; - export import FileDeleted = FilesAPI.FileDeleted; - export import FileObject = FilesAPI.FileObject; - export import FilePurpose = FilesAPI.FilePurpose; - export import FileObjectsPage = FilesAPI.FileObjectsPage; - export import FileCreateParams = FilesAPI.FileCreateParams; - export import FileListParams = FilesAPI.FileListParams; +Files.FileObjectsPage = FileObjectsPage; + +export declare namespace Files { + export { + type FileContent as FileContent, + type FileDeleted as FileDeleted, + type FileObject as FileObject, + type FilePurpose as FilePurpose, + FileObjectsPage as FileObjectsPage, + type FileCreateParams as FileCreateParams, + type FileListParams as FileListParams, + }; } diff --git a/src/resources/fine-tuning/alpha.ts b/src/resources/fine-tuning/alpha.ts new file mode 100644 index 000000000..446b6431e --- /dev/null +++ b/src/resources/fine-tuning/alpha.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './alpha/index'; diff --git a/src/resources/fine-tuning/alpha/alpha.ts b/src/resources/fine-tuning/alpha/alpha.ts new file mode 100644 index 000000000..77d695195 --- /dev/null +++ b/src/resources/fine-tuning/alpha/alpha.ts @@ -0,0 +1,27 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as GradersAPI from './graders'; +import { + GraderRunParams, + GraderRunResponse, + GraderValidateParams, + GraderValidateResponse, + Graders, +} from './graders'; + +export class Alpha extends APIResource { + graders: GradersAPI.Graders = new GradersAPI.Graders(this._client); +} + +Alpha.Graders = Graders; + +export declare namespace Alpha { + export { + Graders as Graders, + type GraderRunResponse as GraderRunResponse, + type GraderValidateResponse as GraderValidateResponse, + type GraderRunParams as GraderRunParams, + type GraderValidateParams as GraderValidateParams, + }; +} diff --git a/src/resources/fine-tuning/alpha/graders.ts b/src/resources/fine-tuning/alpha/graders.ts new file mode 100644 index 000000000..a9ef57f71 --- /dev/null +++ b/src/resources/fine-tuning/alpha/graders.ts @@ -0,0 +1,168 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as Core from '../../../core'; +import * as GraderModelsAPI from '../../graders/grader-models'; + +export class Graders extends APIResource { + /** + * Run a grader. + * + * @example + * ```ts + * const response = await client.fineTuning.alpha.graders.run({ + * grader: { + * input: 'input', + * name: 'name', + * operation: 'eq', + * reference: 'reference', + * type: 'string_check', + * }, + * model_sample: 'model_sample', + * reference_answer: 'string', + * }); + * ``` + */ + run(body: GraderRunParams, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post('/fine_tuning/alpha/graders/run', { body, ...options }); + } + + /** + * Validate a grader. + * + * @example + * ```ts + * const response = + * await client.fineTuning.alpha.graders.validate({ + * grader: { + * input: 'input', + * name: 'name', + * operation: 'eq', + * reference: 'reference', + * type: 'string_check', + * }, + * }); + * ``` + */ + validate( + body: GraderValidateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post('/fine_tuning/alpha/graders/validate', { body, ...options }); + } +} + +export interface GraderRunResponse { + metadata: GraderRunResponse.Metadata; + + model_grader_token_usage_per_model: Record; + + reward: number; + + sub_rewards: Record; +} + +export namespace GraderRunResponse { + export interface Metadata { + errors: Metadata.Errors; + + execution_time: number; + + name: string; + + sampled_model_name: string | null; + + scores: Record; + + token_usage: number | null; + + type: string; + } + + export namespace Metadata { + export interface Errors { + formula_parse_error: boolean; + + invalid_variable_error: boolean; + + model_grader_parse_error: boolean; + + model_grader_refusal_error: boolean; + + model_grader_server_error: boolean; + + model_grader_server_error_details: string | null; + + other_error: boolean; + + python_grader_runtime_error: boolean; + + python_grader_runtime_error_details: string | null; + + python_grader_server_error: boolean; + + python_grader_server_error_type: string | null; + + sample_parse_error: boolean; + + truncated_observation_error: boolean; + + unresponsive_reward_error: boolean; + } + } +} + +export interface GraderValidateResponse { + /** + * The grader used for the fine-tuning job. + */ + grader?: + | GraderModelsAPI.StringCheckGrader + | GraderModelsAPI.TextSimilarityGrader + | GraderModelsAPI.PythonGrader + | GraderModelsAPI.ScoreModelGrader + | GraderModelsAPI.MultiGrader; +} + +export interface GraderRunParams { + /** + * The grader used for the fine-tuning job. + */ + grader: + | GraderModelsAPI.StringCheckGrader + | GraderModelsAPI.TextSimilarityGrader + | GraderModelsAPI.PythonGrader + | GraderModelsAPI.ScoreModelGrader + | GraderModelsAPI.MultiGrader; + + /** + * The model sample to be evaluated. + */ + model_sample: string; + + /** + * The reference answer for the evaluation. + */ + reference_answer: string | unknown | Array | number; +} + +export interface GraderValidateParams { + /** + * The grader used for the fine-tuning job. + */ + grader: + | GraderModelsAPI.StringCheckGrader + | GraderModelsAPI.TextSimilarityGrader + | GraderModelsAPI.PythonGrader + | GraderModelsAPI.ScoreModelGrader + | GraderModelsAPI.MultiGrader; +} + +export declare namespace Graders { + export { + type GraderRunResponse as GraderRunResponse, + type GraderValidateResponse as GraderValidateResponse, + type GraderRunParams as GraderRunParams, + type GraderValidateParams as GraderValidateParams, + }; +} diff --git a/src/resources/fine-tuning/alpha/index.ts b/src/resources/fine-tuning/alpha/index.ts new file mode 100644 index 000000000..47b229bc3 --- /dev/null +++ b/src/resources/fine-tuning/alpha/index.ts @@ -0,0 +1,10 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Alpha } from './alpha'; +export { + Graders, + type GraderRunResponse, + type GraderValidateResponse, + type GraderRunParams, + type GraderValidateParams, +} from './graders'; diff --git a/src/resources/fine-tuning/checkpoints.ts b/src/resources/fine-tuning/checkpoints.ts new file mode 100644 index 000000000..eb09063f6 --- /dev/null +++ b/src/resources/fine-tuning/checkpoints.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './checkpoints/index'; diff --git a/src/resources/fine-tuning/checkpoints/checkpoints.ts b/src/resources/fine-tuning/checkpoints/checkpoints.ts new file mode 100644 index 000000000..08422aa64 --- /dev/null +++ b/src/resources/fine-tuning/checkpoints/checkpoints.ts @@ -0,0 +1,32 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import * as PermissionsAPI from './permissions'; +import { + PermissionCreateParams, + PermissionCreateResponse, + PermissionCreateResponsesPage, + PermissionDeleteResponse, + PermissionRetrieveParams, + PermissionRetrieveResponse, + Permissions, +} from './permissions'; + +export class Checkpoints extends APIResource { + permissions: PermissionsAPI.Permissions = new PermissionsAPI.Permissions(this._client); +} + +Checkpoints.Permissions = Permissions; +Checkpoints.PermissionCreateResponsesPage = PermissionCreateResponsesPage; + +export declare namespace Checkpoints { + export { + Permissions as Permissions, + type PermissionCreateResponse as PermissionCreateResponse, + type PermissionRetrieveResponse as PermissionRetrieveResponse, + type PermissionDeleteResponse as PermissionDeleteResponse, + PermissionCreateResponsesPage as PermissionCreateResponsesPage, + type PermissionCreateParams as PermissionCreateParams, + type PermissionRetrieveParams as PermissionRetrieveParams, + }; +} diff --git a/src/resources/fine-tuning/checkpoints/index.ts b/src/resources/fine-tuning/checkpoints/index.ts new file mode 100644 index 000000000..51d1af9cf --- /dev/null +++ b/src/resources/fine-tuning/checkpoints/index.ts @@ -0,0 +1,12 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { Checkpoints } from './checkpoints'; +export { + PermissionCreateResponsesPage, + Permissions, + type PermissionCreateResponse, + type PermissionRetrieveResponse, + type PermissionDeleteResponse, + type PermissionCreateParams, + type PermissionRetrieveParams, +} from './permissions'; diff --git a/src/resources/fine-tuning/checkpoints/permissions.ts b/src/resources/fine-tuning/checkpoints/permissions.ts new file mode 100644 index 000000000..dc25bab7f --- /dev/null +++ b/src/resources/fine-tuning/checkpoints/permissions.ts @@ -0,0 +1,230 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import { Page } from '../../../pagination'; + +export class Permissions extends APIResource { + /** + * **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys). + * + * This enables organization owners to share fine-tuned models with other projects + * in their organization. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const permissionCreateResponse of client.fineTuning.checkpoints.permissions.create( + * 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + * { project_ids: ['string'] }, + * )) { + * // ... + * } + * ``` + */ + create( + fineTunedModelCheckpoint: string, + body: PermissionCreateParams, + options?: Core.RequestOptions, + ): Core.PagePromise { + return this._client.getAPIList( + `/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, + PermissionCreateResponsesPage, + { body, method: 'post', ...options }, + ); + } + + /** + * **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + * + * Organization owners can use this endpoint to view all permissions for a + * fine-tuned model checkpoint. + * + * @example + * ```ts + * const permission = + * await client.fineTuning.checkpoints.permissions.retrieve( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` + */ + retrieve( + fineTunedModelCheckpoint: string, + query?: PermissionRetrieveParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve( + fineTunedModelCheckpoint: string, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve( + fineTunedModelCheckpoint: string, + query: PermissionRetrieveParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.retrieve(fineTunedModelCheckpoint, {}, query); + } + return this._client.get(`/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions`, { + query, + ...options, + }); + } + + /** + * **NOTE:** This endpoint requires an [admin API key](../admin-api-keys). + * + * Organization owners can use this endpoint to delete a permission for a + * fine-tuned model checkpoint. + * + * @example + * ```ts + * const permission = + * await client.fineTuning.checkpoints.permissions.del( + * 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + * 'cp_zc4Q7MP6XxulcVzj4MZdwsAB', + * ); + * ``` + */ + del( + fineTunedModelCheckpoint: string, + permissionId: string, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.delete( + `/fine_tuning/checkpoints/${fineTunedModelCheckpoint}/permissions/${permissionId}`, + options, + ); + } +} + +/** + * Note: no pagination actually occurs yet, this is for forwards-compatibility. + */ +export class PermissionCreateResponsesPage extends Page {} + +/** + * The `checkpoint.permission` object represents a permission for a fine-tuned + * model checkpoint. + */ +export interface PermissionCreateResponse { + /** + * The permission identifier, which can be referenced in the API endpoints. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the permission was created. + */ + created_at: number; + + /** + * The object type, which is always "checkpoint.permission". + */ + object: 'checkpoint.permission'; + + /** + * The project identifier that the permission is for. + */ + project_id: string; +} + +export interface PermissionRetrieveResponse { + data: Array; + + has_more: boolean; + + object: 'list'; + + first_id?: string | null; + + last_id?: string | null; +} + +export namespace PermissionRetrieveResponse { + /** + * The `checkpoint.permission` object represents a permission for a fine-tuned + * model checkpoint. + */ + export interface Data { + /** + * The permission identifier, which can be referenced in the API endpoints. + */ + id: string; + + /** + * The Unix timestamp (in seconds) for when the permission was created. + */ + created_at: number; + + /** + * The object type, which is always "checkpoint.permission". + */ + object: 'checkpoint.permission'; + + /** + * The project identifier that the permission is for. + */ + project_id: string; + } +} + +export interface PermissionDeleteResponse { + /** + * The ID of the fine-tuned model checkpoint permission that was deleted. + */ + id: string; + + /** + * Whether the fine-tuned model checkpoint permission was successfully deleted. + */ + deleted: boolean; + + /** + * The object type, which is always "checkpoint.permission". + */ + object: 'checkpoint.permission'; +} + +export interface PermissionCreateParams { + /** + * The project identifiers to grant access to. + */ + project_ids: Array; +} + +export interface PermissionRetrieveParams { + /** + * Identifier for the last permission ID from the previous pagination request. + */ + after?: string; + + /** + * Number of permissions to retrieve. + */ + limit?: number; + + /** + * The order in which to retrieve permissions. + */ + order?: 'ascending' | 'descending'; + + /** + * The ID of the project to get permissions for. + */ + project_id?: string; +} + +Permissions.PermissionCreateResponsesPage = PermissionCreateResponsesPage; + +export declare namespace Permissions { + export { + type PermissionCreateResponse as PermissionCreateResponse, + type PermissionRetrieveResponse as PermissionRetrieveResponse, + type PermissionDeleteResponse as PermissionDeleteResponse, + PermissionCreateResponsesPage as PermissionCreateResponsesPage, + type PermissionCreateParams as PermissionCreateParams, + type PermissionRetrieveParams as PermissionRetrieveParams, + }; +} diff --git a/src/resources/fine-tuning/fine-tuning.ts b/src/resources/fine-tuning/fine-tuning.ts index b1ba34ecf..8fb54983b 100644 --- a/src/resources/fine-tuning/fine-tuning.ts +++ b/src/resources/fine-tuning/fine-tuning.ts @@ -1,22 +1,75 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import * as MethodsAPI from './methods'; +import { + DpoHyperparameters, + DpoMethod, + Methods, + ReinforcementHyperparameters, + ReinforcementMethod, + SupervisedHyperparameters, + SupervisedMethod, +} from './methods'; +import * as AlphaAPI from './alpha/alpha'; +import { Alpha } from './alpha/alpha'; +import * as CheckpointsAPI from './checkpoints/checkpoints'; +import { Checkpoints } from './checkpoints/checkpoints'; import * as JobsAPI from './jobs/jobs'; +import { + FineTuningJob, + FineTuningJobEvent, + FineTuningJobEventsPage, + FineTuningJobIntegration, + FineTuningJobWandbIntegration, + FineTuningJobWandbIntegrationObject, + FineTuningJobsPage, + JobCreateParams, + JobListEventsParams, + JobListParams, + Jobs, +} from './jobs/jobs'; export class FineTuning extends APIResource { + methods: MethodsAPI.Methods = new MethodsAPI.Methods(this._client); jobs: JobsAPI.Jobs = new JobsAPI.Jobs(this._client); + checkpoints: CheckpointsAPI.Checkpoints = new CheckpointsAPI.Checkpoints(this._client); + alpha: AlphaAPI.Alpha = new AlphaAPI.Alpha(this._client); } -export namespace FineTuning { - export import Jobs = JobsAPI.Jobs; - export import FineTuningJob = JobsAPI.FineTuningJob; - export import FineTuningJobEvent = JobsAPI.FineTuningJobEvent; - export import FineTuningJobIntegration = JobsAPI.FineTuningJobIntegration; - export import FineTuningJobWandbIntegration = JobsAPI.FineTuningJobWandbIntegration; - export import FineTuningJobWandbIntegrationObject = JobsAPI.FineTuningJobWandbIntegrationObject; - export import FineTuningJobsPage = JobsAPI.FineTuningJobsPage; - export import FineTuningJobEventsPage = JobsAPI.FineTuningJobEventsPage; - export import JobCreateParams = JobsAPI.JobCreateParams; - export import JobListParams = JobsAPI.JobListParams; - export import JobListEventsParams = JobsAPI.JobListEventsParams; +FineTuning.Methods = Methods; +FineTuning.Jobs = Jobs; +FineTuning.FineTuningJobsPage = FineTuningJobsPage; +FineTuning.FineTuningJobEventsPage = FineTuningJobEventsPage; +FineTuning.Checkpoints = Checkpoints; +FineTuning.Alpha = Alpha; + +export declare namespace FineTuning { + export { + Methods as Methods, + type DpoHyperparameters as DpoHyperparameters, + type DpoMethod as DpoMethod, + type ReinforcementHyperparameters as ReinforcementHyperparameters, + type ReinforcementMethod as ReinforcementMethod, + type SupervisedHyperparameters as SupervisedHyperparameters, + type SupervisedMethod as SupervisedMethod, + }; + + export { + Jobs as Jobs, + type FineTuningJob as FineTuningJob, + type FineTuningJobEvent as FineTuningJobEvent, + type FineTuningJobIntegration as FineTuningJobIntegration, + type FineTuningJobWandbIntegration as FineTuningJobWandbIntegration, + type FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject, + FineTuningJobsPage as FineTuningJobsPage, + FineTuningJobEventsPage as FineTuningJobEventsPage, + type JobCreateParams as JobCreateParams, + type JobListParams as JobListParams, + type JobListEventsParams as JobListEventsParams, + }; + + export { Checkpoints as Checkpoints }; + + export { Alpha as Alpha }; } diff --git a/src/resources/fine-tuning/index.ts b/src/resources/fine-tuning/index.ts index 1d8739a0a..878ac402d 100644 --- a/src/resources/fine-tuning/index.ts +++ b/src/resources/fine-tuning/index.ts @@ -1,16 +1,27 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export { Alpha } from './alpha/index'; +export { Checkpoints } from './checkpoints/index'; export { FineTuning } from './fine-tuning'; export { - FineTuningJob, - FineTuningJobEvent, - FineTuningJobIntegration, - FineTuningJobWandbIntegration, - FineTuningJobWandbIntegrationObject, - JobCreateParams, - JobListParams, - JobListEventsParams, FineTuningJobsPage, FineTuningJobEventsPage, Jobs, + type FineTuningJob, + type FineTuningJobEvent, + type FineTuningJobIntegration, + type FineTuningJobWandbIntegration, + type FineTuningJobWandbIntegrationObject, + type JobCreateParams, + type JobListParams, + type JobListEventsParams, } from './jobs/index'; +export { + Methods, + type DpoHyperparameters, + type DpoMethod, + type ReinforcementHyperparameters, + type ReinforcementMethod, + type SupervisedHyperparameters, + type SupervisedMethod, +} from './methods'; diff --git a/src/resources/fine-tuning/jobs/checkpoints.ts b/src/resources/fine-tuning/jobs/checkpoints.ts index 02896b26d..10902e715 100644 --- a/src/resources/fine-tuning/jobs/checkpoints.ts +++ b/src/resources/fine-tuning/jobs/checkpoints.ts @@ -3,12 +3,21 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as CheckpointsAPI from './checkpoints'; import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Checkpoints extends APIResource { /** * List checkpoints for a fine-tuning job. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const fineTuningJobCheckpoint of client.fineTuning.jobs.checkpoints.list( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * )) { + * // ... + * } + * ``` */ list( fineTuningJobId: string, @@ -101,8 +110,12 @@ export namespace FineTuningJobCheckpoint { export interface CheckpointListParams extends CursorPageParams {} -export namespace Checkpoints { - export import FineTuningJobCheckpoint = CheckpointsAPI.FineTuningJobCheckpoint; - export import FineTuningJobCheckpointsPage = CheckpointsAPI.FineTuningJobCheckpointsPage; - export import CheckpointListParams = CheckpointsAPI.CheckpointListParams; +Checkpoints.FineTuningJobCheckpointsPage = FineTuningJobCheckpointsPage; + +export declare namespace Checkpoints { + export { + type FineTuningJobCheckpoint as FineTuningJobCheckpoint, + FineTuningJobCheckpointsPage as FineTuningJobCheckpointsPage, + type CheckpointListParams as CheckpointListParams, + }; } diff --git a/src/resources/fine-tuning/jobs/index.ts b/src/resources/fine-tuning/jobs/index.ts index 275c776e9..7a05b48b2 100644 --- a/src/resources/fine-tuning/jobs/index.ts +++ b/src/resources/fine-tuning/jobs/index.ts @@ -1,21 +1,21 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. export { - FineTuningJob, - FineTuningJobEvent, - FineTuningJobIntegration, - FineTuningJobWandbIntegration, - FineTuningJobWandbIntegrationObject, - JobCreateParams, - JobListParams, - JobListEventsParams, + FineTuningJobCheckpointsPage, + Checkpoints, + type FineTuningJobCheckpoint, + type CheckpointListParams, +} from './checkpoints'; +export { FineTuningJobsPage, FineTuningJobEventsPage, Jobs, + type FineTuningJob, + type FineTuningJobEvent, + type FineTuningJobIntegration, + type FineTuningJobWandbIntegration, + type FineTuningJobWandbIntegrationObject, + type JobCreateParams, + type JobListParams, + type JobListEventsParams, } from './jobs'; -export { - FineTuningJobCheckpoint, - CheckpointListParams, - FineTuningJobCheckpointsPage, - Checkpoints, -} from './checkpoints'; diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts index 54b5c4e6a..cc5f55e9a 100644 --- a/src/resources/fine-tuning/jobs/jobs.ts +++ b/src/resources/fine-tuning/jobs/jobs.ts @@ -3,8 +3,14 @@ import { APIResource } from '../../../resource'; import { isRequestOptions } from '../../../core'; import * as Core from '../../../core'; -import * as JobsAPI from './jobs'; +import * as MethodsAPI from '../methods'; import * as CheckpointsAPI from './checkpoints'; +import { + CheckpointListParams, + Checkpoints, + FineTuningJobCheckpoint, + FineTuningJobCheckpointsPage, +} from './checkpoints'; import { CursorPage, type CursorPageParams } from '../../../pagination'; export class Jobs extends APIResource { @@ -18,6 +24,14 @@ export class Jobs extends APIResource { * of the fine-tuned models once complete. * * [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.create({ + * model: 'gpt-4o-mini', + * training_file: 'file-abc123', + * }); + * ``` */ create(body: JobCreateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/fine_tuning/jobs', { body, ...options }); @@ -27,6 +41,13 @@ export class Jobs extends APIResource { * Get info about a fine-tuning job. * * [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.retrieve( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` */ retrieve(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.get(`/fine_tuning/jobs/${fineTuningJobId}`, options); @@ -34,6 +55,14 @@ export class Jobs extends APIResource { /** * List your organization's fine-tuning jobs + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const fineTuningJob of client.fineTuning.jobs.list()) { + * // ... + * } + * ``` */ list( query?: JobListParams, @@ -52,6 +81,13 @@ export class Jobs extends APIResource { /** * Immediately cancel a fine-tune job. + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.cancel( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` */ cancel(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise { return this._client.post(`/fine_tuning/jobs/${fineTuningJobId}/cancel`, options); @@ -59,6 +95,16 @@ export class Jobs extends APIResource { /** * Get status updates for a fine-tuning job. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const fineTuningJobEvent of client.fineTuning.jobs.listEvents( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * )) { + * // ... + * } + * ``` */ listEvents( fineTuningJobId: string, @@ -82,6 +128,34 @@ export class Jobs extends APIResource { ...options, }); } + + /** + * Pause a fine-tune job. + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.pause( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` + */ + pause(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/fine_tuning/jobs/${fineTuningJobId}/pause`, options); + } + + /** + * Resume a fine-tune job. + * + * @example + * ```ts + * const fineTuningJob = await client.fineTuning.jobs.resume( + * 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + * ); + * ``` + */ + resume(fineTuningJobId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.post(`/fine_tuning/jobs/${fineTuningJobId}/resume`, options); + } } export class FineTuningJobsPage extends CursorPage {} @@ -122,9 +196,8 @@ export interface FineTuningJob { finished_at: number | null; /** - * The hyperparameters used for the fine-tuning job. See the - * [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for - * more details. + * The hyperparameters used for the fine-tuning job. This value will only be + * returned when running `supervised` jobs. */ hyperparameters: FineTuningJob.Hyperparameters; @@ -190,6 +263,11 @@ export interface FineTuningJob { * A list of integrations to enable for this fine-tuning job. */ integrations?: Array | null; + + /** + * The method used for fine-tuning. + */ + method?: FineTuningJob.Method; } export namespace FineTuningJob { @@ -216,18 +294,52 @@ export namespace FineTuningJob { } /** - * The hyperparameters used for the fine-tuning job. See the - * [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for - * more details. + * The hyperparameters used for the fine-tuning job. This value will only be + * returned when running `supervised` jobs. */ export interface Hyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: unknown | 'auto' | number | null; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + /** * The number of epochs to train the model for. An epoch refers to one full cycle - * through the training dataset. "auto" decides the optimal number of epochs based - * on the size of the dataset. If setting the number manually, we support any - * number between 1 and 50 epochs. + * through the training dataset. */ - n_epochs: 'auto' | number; + n_epochs?: 'auto' | number; + } + + /** + * The method used for fine-tuning. + */ + export interface Method { + /** + * The type of method. Is either `supervised`, `dpo`, or `reinforcement`. + */ + type: 'supervised' | 'dpo' | 'reinforcement'; + + /** + * Configuration for the DPO fine-tuning method. + */ + dpo?: MethodsAPI.DpoMethod; + + /** + * Configuration for the reinforcement fine-tuning method. + */ + reinforcement?: MethodsAPI.ReinforcementMethod; + + /** + * Configuration for the supervised fine-tuning method. + */ + supervised?: MethodsAPI.SupervisedMethod; } } @@ -235,15 +347,40 @@ export namespace FineTuningJob { * Fine-tuning job event object */ export interface FineTuningJobEvent { + /** + * The object identifier. + */ id: string; + /** + * The Unix timestamp (in seconds) for when the fine-tuning job was created. + */ created_at: number; + /** + * The log level of the event. + */ level: 'info' | 'warn' | 'error'; + /** + * The message of the event. + */ message: string; + /** + * The object type, which is always "fine_tuning.job.event". + */ object: 'fine_tuning.job.event'; + + /** + * The data associated with the event. + */ + data?: unknown; + + /** + * The type of event. + */ + type?: 'message' | 'metrics'; } export type FineTuningJobIntegration = FineTuningJobWandbIntegrationObject; @@ -299,7 +436,7 @@ export interface FineTuningJobWandbIntegrationObject { export interface JobCreateParams { /** * The name of the model to fine-tune. You can select one of the - * [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + * [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). */ model: (string & {}) | 'babbage-002' | 'davinci-002' | 'gpt-3.5-turbo' | 'gpt-4o-mini'; @@ -313,8 +450,10 @@ export interface JobCreateParams { * your file with the purpose `fine-tune`. * * The contents of the file should differ depending on if the model uses the - * [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + * [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), * [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + * format, or if the fine-tuning method uses the + * [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) * format. * * See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) @@ -323,7 +462,9 @@ export interface JobCreateParams { training_file: string; /** - * The hyperparameters used for the fine-tuning job. + * @deprecated The hyperparameters used for the fine-tuning job. This value is now + * deprecated in favor of `method`, and should be passed in under the `method` + * parameter. */ hyperparameters?: JobCreateParams.Hyperparameters; @@ -332,6 +473,11 @@ export interface JobCreateParams { */ integrations?: Array | null; + /** + * The method used for fine-tuning. + */ + method?: JobCreateParams.Method; + /** * The seed controls the reproducibility of the job. Passing in the same seed and * job parameters should produce the same results, but may differ in rare cases. If @@ -367,7 +513,9 @@ export interface JobCreateParams { export namespace JobCreateParams { /** - * The hyperparameters used for the fine-tuning job. + * @deprecated The hyperparameters used for the fine-tuning job. This value is now + * deprecated in favor of `method`, and should be passed in under the `method` + * parameter. */ export interface Hyperparameters { /** @@ -439,25 +587,60 @@ export namespace JobCreateParams { tags?: Array; } } + + /** + * The method used for fine-tuning. + */ + export interface Method { + /** + * The type of method. Is either `supervised`, `dpo`, or `reinforcement`. + */ + type: 'supervised' | 'dpo' | 'reinforcement'; + + /** + * Configuration for the DPO fine-tuning method. + */ + dpo?: MethodsAPI.DpoMethod; + + /** + * Configuration for the reinforcement fine-tuning method. + */ + reinforcement?: MethodsAPI.ReinforcementMethod; + + /** + * Configuration for the supervised fine-tuning method. + */ + supervised?: MethodsAPI.SupervisedMethod; + } } export interface JobListParams extends CursorPageParams {} export interface JobListEventsParams extends CursorPageParams {} -export namespace Jobs { - export import FineTuningJob = JobsAPI.FineTuningJob; - export import FineTuningJobEvent = JobsAPI.FineTuningJobEvent; - export import FineTuningJobIntegration = JobsAPI.FineTuningJobIntegration; - export import FineTuningJobWandbIntegration = JobsAPI.FineTuningJobWandbIntegration; - export import FineTuningJobWandbIntegrationObject = JobsAPI.FineTuningJobWandbIntegrationObject; - export import FineTuningJobsPage = JobsAPI.FineTuningJobsPage; - export import FineTuningJobEventsPage = JobsAPI.FineTuningJobEventsPage; - export import JobCreateParams = JobsAPI.JobCreateParams; - export import JobListParams = JobsAPI.JobListParams; - export import JobListEventsParams = JobsAPI.JobListEventsParams; - export import Checkpoints = CheckpointsAPI.Checkpoints; - export import FineTuningJobCheckpoint = CheckpointsAPI.FineTuningJobCheckpoint; - export import FineTuningJobCheckpointsPage = CheckpointsAPI.FineTuningJobCheckpointsPage; - export import CheckpointListParams = CheckpointsAPI.CheckpointListParams; +Jobs.FineTuningJobsPage = FineTuningJobsPage; +Jobs.FineTuningJobEventsPage = FineTuningJobEventsPage; +Jobs.Checkpoints = Checkpoints; +Jobs.FineTuningJobCheckpointsPage = FineTuningJobCheckpointsPage; + +export declare namespace Jobs { + export { + type FineTuningJob as FineTuningJob, + type FineTuningJobEvent as FineTuningJobEvent, + type FineTuningJobIntegration as FineTuningJobIntegration, + type FineTuningJobWandbIntegration as FineTuningJobWandbIntegration, + type FineTuningJobWandbIntegrationObject as FineTuningJobWandbIntegrationObject, + FineTuningJobsPage as FineTuningJobsPage, + FineTuningJobEventsPage as FineTuningJobEventsPage, + type JobCreateParams as JobCreateParams, + type JobListParams as JobListParams, + type JobListEventsParams as JobListEventsParams, + }; + + export { + Checkpoints as Checkpoints, + type FineTuningJobCheckpoint as FineTuningJobCheckpoint, + FineTuningJobCheckpointsPage as FineTuningJobCheckpointsPage, + type CheckpointListParams as CheckpointListParams, + }; } diff --git a/src/resources/fine-tuning/methods.ts b/src/resources/fine-tuning/methods.ts new file mode 100644 index 000000000..aa459c74c --- /dev/null +++ b/src/resources/fine-tuning/methods.ts @@ -0,0 +1,152 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as GraderModelsAPI from '../graders/grader-models'; + +export class Methods extends APIResource {} + +/** + * The hyperparameters used for the DPO fine-tuning job. + */ +export interface DpoHyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * The beta value for the DPO method. A higher beta value will increase the weight + * of the penalty between the policy and reference model. + */ + beta?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; +} + +/** + * Configuration for the DPO fine-tuning method. + */ +export interface DpoMethod { + /** + * The hyperparameters used for the DPO fine-tuning job. + */ + hyperparameters?: DpoHyperparameters; +} + +/** + * The hyperparameters used for the reinforcement fine-tuning job. + */ +export interface ReinforcementHyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * Multiplier on amount of compute used for exploring search space during training. + */ + compute_multiplier?: 'auto' | number; + + /** + * The number of training steps between evaluation runs. + */ + eval_interval?: 'auto' | number; + + /** + * Number of evaluation samples to generate per training step. + */ + eval_samples?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; + + /** + * Level of reasoning effort. + */ + reasoning_effort?: 'default' | 'low' | 'medium' | 'high'; +} + +/** + * Configuration for the reinforcement fine-tuning method. + */ +export interface ReinforcementMethod { + /** + * The grader used for the fine-tuning job. + */ + grader: + | GraderModelsAPI.StringCheckGrader + | GraderModelsAPI.TextSimilarityGrader + | GraderModelsAPI.PythonGrader + | GraderModelsAPI.ScoreModelGrader + | GraderModelsAPI.MultiGrader; + + /** + * The hyperparameters used for the reinforcement fine-tuning job. + */ + hyperparameters?: ReinforcementHyperparameters; +} + +/** + * The hyperparameters used for the fine-tuning job. + */ +export interface SupervisedHyperparameters { + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + batch_size?: 'auto' | number; + + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to + * avoid overfitting. + */ + learning_rate_multiplier?: 'auto' | number; + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + n_epochs?: 'auto' | number; +} + +/** + * Configuration for the supervised fine-tuning method. + */ +export interface SupervisedMethod { + /** + * The hyperparameters used for the fine-tuning job. + */ + hyperparameters?: SupervisedHyperparameters; +} + +export declare namespace Methods { + export { + type DpoHyperparameters as DpoHyperparameters, + type DpoMethod as DpoMethod, + type ReinforcementHyperparameters as ReinforcementHyperparameters, + type ReinforcementMethod as ReinforcementMethod, + type SupervisedHyperparameters as SupervisedHyperparameters, + type SupervisedMethod as SupervisedMethod, + }; +} diff --git a/src/resources/graders.ts b/src/resources/graders.ts new file mode 100644 index 000000000..2ea9aa959 --- /dev/null +++ b/src/resources/graders.ts @@ -0,0 +1,3 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export * from './graders/index'; diff --git a/src/resources/graders/grader-models.ts b/src/resources/graders/grader-models.ts new file mode 100644 index 000000000..d2c335300 --- /dev/null +++ b/src/resources/graders/grader-models.ts @@ -0,0 +1,296 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as ResponsesAPI from '../responses/responses'; + +export class GraderModels extends APIResource {} + +/** + * A LabelModelGrader object which uses a model to assign labels to each item in + * the evaluation. + */ +export interface LabelModelGrader { + input: Array; + + /** + * The labels to assign to each item in the evaluation. + */ + labels: Array; + + /** + * The model to use for the evaluation. Must support structured outputs. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The labels that indicate a passing result. Must be a subset of labels. + */ + passing_labels: Array; + + /** + * The object type, which is always `label_model`. + */ + type: 'label_model'; +} + +export namespace LabelModelGrader { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } +} + +/** + * A MultiGrader object combines the output of multiple graders to produce a single + * score. + */ +export interface MultiGrader { + /** + * A formula to calculate the output based on grader results. + */ + calculate_output: string; + + graders: Record< + string, + StringCheckGrader | TextSimilarityGrader | PythonGrader | ScoreModelGrader | LabelModelGrader + >; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `multi`. + */ + type: 'multi'; +} + +/** + * A PythonGrader object that runs a python script on the input. + */ +export interface PythonGrader { + /** + * The name of the grader. + */ + name: string; + + /** + * The source code of the python script. + */ + source: string; + + /** + * The object type, which is always `python`. + */ + type: 'python'; + + /** + * The image tag to use for the python script. + */ + image_tag?: string; +} + +/** + * A ScoreModelGrader object that uses a model to assign a score to the input. + */ +export interface ScoreModelGrader { + /** + * The input text. This may include template strings. + */ + input: Array; + + /** + * The model to use for the evaluation. + */ + model: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The object type, which is always `score_model`. + */ + type: 'score_model'; + + /** + * The range of the score. Defaults to `[0, 1]`. + */ + range?: Array; + + /** + * The sampling parameters for the model. + */ + sampling_params?: unknown; +} + +export namespace ScoreModelGrader { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ + export interface Input { + /** + * Text inputs to the model - can contain template strings. + */ + content: string | ResponsesAPI.ResponseInputText | Input.OutputText; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; + } + + export namespace Input { + /** + * A text output from the model. + */ + export interface OutputText { + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; + } + } +} + +/** + * A StringCheckGrader object that performs a string comparison between input and + * reference using a specified operation. + */ +export interface StringCheckGrader { + /** + * The input text. This may include template strings. + */ + input: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + */ + operation: 'eq' | 'ne' | 'like' | 'ilike'; + + /** + * The reference text. This may include template strings. + */ + reference: string; + + /** + * The object type, which is always `string_check`. + */ + type: 'string_check'; +} + +/** + * A TextSimilarityGrader object which grades text based on similarity metrics. + */ +export interface TextSimilarityGrader { + /** + * The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, + * `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. + */ + evaluation_metric: + | 'fuzzy_match' + | 'bleu' + | 'gleu' + | 'meteor' + | 'rouge_1' + | 'rouge_2' + | 'rouge_3' + | 'rouge_4' + | 'rouge_5' + | 'rouge_l'; + + /** + * The text being graded. + */ + input: string; + + /** + * The name of the grader. + */ + name: string; + + /** + * The text being graded against. + */ + reference: string; + + /** + * The type of grader. + */ + type: 'text_similarity'; +} + +export declare namespace GraderModels { + export { + type LabelModelGrader as LabelModelGrader, + type MultiGrader as MultiGrader, + type PythonGrader as PythonGrader, + type ScoreModelGrader as ScoreModelGrader, + type StringCheckGrader as StringCheckGrader, + type TextSimilarityGrader as TextSimilarityGrader, + }; +} diff --git a/src/resources/graders/graders.ts b/src/resources/graders/graders.ts new file mode 100644 index 000000000..de3297450 --- /dev/null +++ b/src/resources/graders/graders.ts @@ -0,0 +1,31 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import * as GraderModelsAPI from './grader-models'; +import { + GraderModels, + LabelModelGrader, + MultiGrader, + PythonGrader, + ScoreModelGrader, + StringCheckGrader, + TextSimilarityGrader, +} from './grader-models'; + +export class Graders extends APIResource { + graderModels: GraderModelsAPI.GraderModels = new GraderModelsAPI.GraderModels(this._client); +} + +Graders.GraderModels = GraderModels; + +export declare namespace Graders { + export { + GraderModels as GraderModels, + type LabelModelGrader as LabelModelGrader, + type MultiGrader as MultiGrader, + type PythonGrader as PythonGrader, + type ScoreModelGrader as ScoreModelGrader, + type StringCheckGrader as StringCheckGrader, + type TextSimilarityGrader as TextSimilarityGrader, + }; +} diff --git a/src/resources/graders/index.ts b/src/resources/graders/index.ts new file mode 100644 index 000000000..82d557a6a --- /dev/null +++ b/src/resources/graders/index.ts @@ -0,0 +1,12 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + GraderModels, + type LabelModelGrader, + type MultiGrader, + type PythonGrader, + type ScoreModelGrader, + type StringCheckGrader, + type TextSimilarityGrader, +} from './grader-models'; +export { Graders } from './graders'; diff --git a/src/resources/images.ts b/src/resources/images.ts index fdd0b8881..c6b14833a 100644 --- a/src/resources/images.ts +++ b/src/resources/images.ts @@ -2,11 +2,17 @@ import { APIResource } from '../resource'; import * as Core from '../core'; -import * as ImagesAPI from './images'; export class Images extends APIResource { /** - * Creates a variation of a given image. + * Creates a variation of a given image. This endpoint only supports `dall-e-2`. + * + * @example + * ```ts + * const imagesResponse = await client.images.createVariation({ + * image: fs.createReadStream('otter.png'), + * }); + * ``` */ createVariation( body: ImageCreateVariationParams, @@ -16,7 +22,16 @@ export class Images extends APIResource { } /** - * Creates an edited or extended image given an original image and a prompt. + * Creates an edited or extended image given one or more source images and a + * prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`. + * + * @example + * ```ts + * const imagesResponse = await client.images.edit({ + * image: fs.createReadStream('path/to/file'), + * prompt: 'A cute baby sea otter wearing a beret', + * }); + * ``` */ edit(body: ImageEditParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/images/edits', Core.multipartFormRequestOptions({ body, ...options })); @@ -24,6 +39,14 @@ export class Images extends APIResource { /** * Creates an image given a prompt. + * [Learn more](https://platform.openai.com/docs/guides/images). + * + * @example + * ```ts + * const imagesResponse = await client.images.generate({ + * prompt: 'A cute baby sea otter', + * }); + * ``` */ generate(body: ImageGenerateParams, options?: Core.RequestOptions): Core.APIPromise { return this._client.post('/images/generations', { body, ...options }); @@ -31,33 +54,93 @@ export class Images extends APIResource { } /** - * Represents the url or the content of an image generated by the OpenAI API. + * Represents the content or the URL of an image generated by the OpenAI API. */ export interface Image { /** - * The base64-encoded JSON of the generated image, if `response_format` is - * `b64_json`. + * The base64-encoded JSON of the generated image. Default value for `gpt-image-1`, + * and only present if `response_format` is set to `b64_json` for `dall-e-2` and + * `dall-e-3`. */ b64_json?: string; /** - * The prompt that was used to generate the image, if there was any revision to the - * prompt. + * For `dall-e-3` only, the revised prompt that was used to generate the image. */ revised_prompt?: string; /** - * The URL of the generated image, if `response_format` is `url` (default). + * When using `dall-e-2` or `dall-e-3`, the URL of the generated image if + * `response_format` is set to `url` (default value). Unsupported for + * `gpt-image-1`. */ url?: string; } -export type ImageModel = 'dall-e-2' | 'dall-e-3'; +export type ImageModel = 'dall-e-2' | 'dall-e-3' | 'gpt-image-1'; +/** + * The response from the image generation endpoint. + */ export interface ImagesResponse { + /** + * The Unix timestamp (in seconds) of when the image was created. + */ created: number; - data: Array; + /** + * The list of generated images. + */ + data?: Array; + + /** + * For `gpt-image-1` only, the token usage information for the image generation. + */ + usage?: ImagesResponse.Usage; +} + +export namespace ImagesResponse { + /** + * For `gpt-image-1` only, the token usage information for the image generation. + */ + export interface Usage { + /** + * The number of tokens (images and text) in the input prompt. + */ + input_tokens: number; + + /** + * The input tokens detailed information for the image generation. + */ + input_tokens_details: Usage.InputTokensDetails; + + /** + * The number of image tokens in the output image. + */ + output_tokens: number; + + /** + * The total number of tokens (images and text) used for the image generation. + */ + total_tokens: number; + } + + export namespace Usage { + /** + * The input tokens detailed information for the image generation. + */ + export interface InputTokensDetails { + /** + * The number of image tokens in the input prompt. + */ + image_tokens: number; + + /** + * The number of text tokens in the input prompt. + */ + text_tokens: number; + } + } } export interface ImageCreateVariationParams { @@ -74,8 +157,7 @@ export interface ImageCreateVariationParams { model?: (string & {}) | ImageModel | null; /** - * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only - * `n=1` is supported. + * The number of images to generate. Must be between 1 and 10. */ n?: number | null; @@ -95,34 +177,52 @@ export interface ImageCreateVariationParams { /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } export interface ImageEditParams { /** - * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask - * is not provided, image must have transparency, which will be used as the mask. + * The image(s) to edit. Must be a supported image file or an array of images. + * + * For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + * 25MB. You can provide up to 16 images. + * + * For `dall-e-2`, you can only provide one image, and it should be a square `png` + * file less than 4MB. */ - image: Core.Uploadable; + image: Core.Uploadable | Array; /** * A text description of the desired image(s). The maximum length is 1000 - * characters. + * characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. */ prompt: string; + /** + * Allows to set transparency for the background of the generated image(s). This + * parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + * `opaque` or `auto` (default value). When `auto` is used, the model will + * automatically determine the best background for the image. + * + * If `transparent`, the output format needs to support transparency, so it should + * be set to either `png` (default value) or `webp`. + */ + background?: 'transparent' | 'opaque' | 'auto' | null; + /** * An additional image whose fully transparent areas (e.g. where alpha is zero) - * indicate where `image` should be edited. Must be a valid PNG file, less than + * indicate where `image` should be edited. If there are multiple images provided, + * the mask will be applied on the first image. Must be a valid PNG file, less than * 4MB, and have the same dimensions as `image`. */ mask?: Core.Uploadable; /** - * The model to use for image generation. Only `dall-e-2` is supported at this - * time. + * The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + * supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + * is used. */ model?: (string & {}) | ImageModel | null; @@ -131,39 +231,68 @@ export interface ImageEditParams { */ n?: number | null; + /** + * The quality of the image that will be generated. `high`, `medium` and `low` are + * only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + * Defaults to `auto`. + */ + quality?: 'standard' | 'low' | 'medium' | 'high' | 'auto' | null; + /** * The format in which the generated images are returned. Must be one of `url` or * `b64_json`. URLs are only valid for 60 minutes after the image has been - * generated. + * generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + * will always return base64-encoded images. */ response_format?: 'url' | 'b64_json' | null; /** - * The size of the generated images. Must be one of `256x256`, `512x512`, or - * `1024x1024`. + * The size of the generated images. Must be one of `1024x1024`, `1536x1024` + * (landscape), `1024x1536` (portrait), or `auto` (default value) for + * `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. */ - size?: '256x256' | '512x512' | '1024x1024' | null; + size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | 'auto' | null; /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } export interface ImageGenerateParams { /** - * A text description of the desired image(s). The maximum length is 1000 - * characters for `dall-e-2` and 4000 characters for `dall-e-3`. + * A text description of the desired image(s). The maximum length is 32000 + * characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + * for `dall-e-3`. */ prompt: string; /** - * The model to use for image generation. + * Allows to set transparency for the background of the generated image(s). This + * parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + * `opaque` or `auto` (default value). When `auto` is used, the model will + * automatically determine the best background for the image. + * + * If `transparent`, the output format needs to support transparency, so it should + * be set to either `png` (default value) or `webp`. + */ + background?: 'transparent' | 'opaque' | 'auto' | null; + + /** + * The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + * `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + * `gpt-image-1` is used. */ model?: (string & {}) | ImageModel | null; + /** + * Control the content-moderation level for images generated by `gpt-image-1`. Must + * be either `low` for less restrictive filtering or `auto` (default value). + */ + moderation?: 'low' | 'auto' | null; + /** * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only * `n=1` is supported. @@ -171,47 +300,77 @@ export interface ImageGenerateParams { n?: number | null; /** - * The quality of the image that will be generated. `hd` creates images with finer - * details and greater consistency across the image. This param is only supported - * for `dall-e-3`. + * The compression level (0-100%) for the generated images. This parameter is only + * supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + * defaults to 100. */ - quality?: 'standard' | 'hd'; + output_compression?: number | null; /** - * The format in which the generated images are returned. Must be one of `url` or - * `b64_json`. URLs are only valid for 60 minutes after the image has been - * generated. + * The format in which the generated images are returned. This parameter is only + * supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + */ + output_format?: 'png' | 'jpeg' | 'webp' | null; + + /** + * The quality of the image that will be generated. + * + * - `auto` (default value) will automatically select the best quality for the + * given model. + * - `high`, `medium` and `low` are supported for `gpt-image-1`. + * - `hd` and `standard` are supported for `dall-e-3`. + * - `standard` is the only option for `dall-e-2`. + */ + quality?: 'standard' | 'hd' | 'low' | 'medium' | 'high' | 'auto' | null; + + /** + * The format in which generated images with `dall-e-2` and `dall-e-3` are + * returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + * after the image has been generated. This parameter isn't supported for + * `gpt-image-1` which will always return base64-encoded images. */ response_format?: 'url' | 'b64_json' | null; /** - * The size of the generated images. Must be one of `256x256`, `512x512`, or - * `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or - * `1024x1792` for `dall-e-3` models. + * The size of the generated images. Must be one of `1024x1024`, `1536x1024` + * (landscape), `1024x1536` (portrait), or `auto` (default value) for + * `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + * one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. */ - size?: '256x256' | '512x512' | '1024x1024' | '1792x1024' | '1024x1792' | null; + size?: + | 'auto' + | '1024x1024' + | '1536x1024' + | '1024x1536' + | '256x256' + | '512x512' + | '1792x1024' + | '1024x1792' + | null; /** - * The style of the generated images. Must be one of `vivid` or `natural`. Vivid - * causes the model to lean towards generating hyper-real and dramatic images. - * Natural causes the model to produce more natural, less hyper-real looking - * images. This param is only supported for `dall-e-3`. + * The style of the generated images. This parameter is only supported for + * `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + * towards generating hyper-real and dramatic images. Natural causes the model to + * produce more natural, less hyper-real looking images. */ style?: 'vivid' | 'natural' | null; /** * A unique identifier representing your end-user, which can help OpenAI to monitor * and detect abuse. - * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; } -export namespace Images { - export import Image = ImagesAPI.Image; - export import ImageModel = ImagesAPI.ImageModel; - export import ImagesResponse = ImagesAPI.ImagesResponse; - export import ImageCreateVariationParams = ImagesAPI.ImageCreateVariationParams; - export import ImageEditParams = ImagesAPI.ImageEditParams; - export import ImageGenerateParams = ImagesAPI.ImageGenerateParams; +export declare namespace Images { + export { + type Image as Image, + type ImageModel as ImageModel, + type ImagesResponse as ImagesResponse, + type ImageCreateVariationParams as ImageCreateVariationParams, + type ImageEditParams as ImageEditParams, + type ImageGenerateParams as ImageGenerateParams, + }; } diff --git a/src/resources/index.ts b/src/resources/index.ts index 15c5db77f..9d827615c 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -2,62 +2,97 @@ export * from './chat/index'; export * from './shared'; -export { AudioModel, AudioResponseFormat, Audio } from './audio/audio'; +export { Audio, type AudioModel, type AudioResponseFormat } from './audio/audio'; export { - Batch, - BatchError, - BatchRequestCounts, - BatchCreateParams, - BatchListParams, BatchesPage, Batches, + type Batch, + type BatchError, + type BatchRequestCounts, + type BatchCreateParams, + type BatchListParams, } from './batches'; export { Beta } from './beta/beta'; export { - Completion, - CompletionChoice, - CompletionUsage, - CompletionCreateParams, - CompletionCreateParamsNonStreaming, - CompletionCreateParamsStreaming, Completions, + type Completion, + type CompletionChoice, + type CompletionUsage, + type CompletionCreateParams, + type CompletionCreateParamsNonStreaming, + type CompletionCreateParamsStreaming, } from './completions'; export { - CreateEmbeddingResponse, - Embedding, - EmbeddingModel, - EmbeddingCreateParams, Embeddings, + type CreateEmbeddingResponse, + type Embedding, + type EmbeddingModel, + type EmbeddingCreateParams, } from './embeddings'; export { - FileContent, - FileDeleted, - FileObject, - FilePurpose, - FileCreateParams, - FileListParams, + EvalListResponsesPage, + Evals, + type EvalCustomDataSourceConfig, + type EvalStoredCompletionsDataSourceConfig, + type EvalCreateResponse, + type EvalRetrieveResponse, + type EvalUpdateResponse, + type EvalListResponse, + type EvalDeleteResponse, + type EvalCreateParams, + type EvalUpdateParams, + type EvalListParams, +} from './evals/evals'; +export { FileObjectsPage, Files, + type FileContent, + type FileDeleted, + type FileObject, + type FilePurpose, + type FileCreateParams, + type FileListParams, } from './files'; export { FineTuning } from './fine-tuning/fine-tuning'; +export { Graders } from './graders/graders'; export { - Image, - ImageModel, - ImagesResponse, - ImageCreateVariationParams, - ImageEditParams, - ImageGenerateParams, Images, + type Image, + type ImageModel, + type ImagesResponse, + type ImageCreateVariationParams, + type ImageEditParams, + type ImageGenerateParams, } from './images'; -export { Model, ModelDeleted, ModelsPage, Models } from './models'; -export { - Moderation, - ModerationImageURLInput, - ModerationModel, - ModerationMultiModalInput, - ModerationTextInput, - ModerationCreateResponse, - ModerationCreateParams, +export { ModelsPage, Models, type Model, type ModelDeleted } from './models'; +export { Moderations, + type Moderation, + type ModerationImageURLInput, + type ModerationModel, + type ModerationMultiModalInput, + type ModerationTextInput, + type ModerationCreateResponse, + type ModerationCreateParams, } from './moderations'; -export { Upload, UploadCreateParams, UploadCompleteParams, Uploads } from './uploads/uploads'; +export { Responses } from './responses/responses'; +export { Uploads, type Upload, type UploadCreateParams, type UploadCompleteParams } from './uploads/uploads'; +export { + VectorStoresPage, + VectorStoreSearchResponsesPage, + VectorStores, + type AutoFileChunkingStrategyParam, + type FileChunkingStrategy, + type FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyObjectParam, + type VectorStore, + type VectorStoreDeleted, + type VectorStoreSearchResponse, + type VectorStoreCreateParams, + type VectorStoreUpdateParams, + type VectorStoreListParams, + type VectorStoreSearchParams, +} from './vector-stores/vector-stores'; diff --git a/src/resources/models.ts b/src/resources/models.ts index 178915747..6d8cd5296 100644 --- a/src/resources/models.ts +++ b/src/resources/models.ts @@ -2,7 +2,6 @@ import { APIResource } from '../resource'; import * as Core from '../core'; -import * as ModelsAPI from './models'; import { Page } from '../pagination'; export class Models extends APIResource { @@ -69,8 +68,8 @@ export interface ModelDeleted { object: string; } -export namespace Models { - export import Model = ModelsAPI.Model; - export import ModelDeleted = ModelsAPI.ModelDeleted; - export import ModelsPage = ModelsAPI.ModelsPage; +Models.ModelsPage = ModelsPage; + +export declare namespace Models { + export { type Model as Model, type ModelDeleted as ModelDeleted, ModelsPage as ModelsPage }; } diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts index ba800509e..86e90376d 100644 --- a/src/resources/moderations.ts +++ b/src/resources/moderations.ts @@ -2,7 +2,6 @@ import { APIResource } from '../resource'; import * as Core from '../core'; -import * as ModerationsAPI from './moderations'; export class Moderations extends APIResource { /** @@ -76,14 +75,14 @@ export namespace Moderation { * execution of wrongdoing, or that gives advice or instruction on how to commit * illicit acts. For example, "how to shoplift" would fit this category. */ - illicit: boolean; + illicit: boolean | null; /** * Content that includes instructions or advice that facilitate the planning or * execution of wrongdoing that also includes violence, or that gives advice or * instruction on the procurement of any weapon. */ - 'illicit/violent': boolean; + 'illicit/violent': boolean | null; /** * Content that promotes, encourages, or depicts acts of self-harm, such as @@ -352,17 +351,19 @@ export interface ModerationCreateParams { * The content moderation model you would like to use. Learn more in * [the moderation guide](https://platform.openai.com/docs/guides/moderation), and * learn about available models - * [here](https://platform.openai.com/docs/models/moderation). + * [here](https://platform.openai.com/docs/models#moderation). */ model?: (string & {}) | ModerationModel; } -export namespace Moderations { - export import Moderation = ModerationsAPI.Moderation; - export import ModerationImageURLInput = ModerationsAPI.ModerationImageURLInput; - export import ModerationModel = ModerationsAPI.ModerationModel; - export import ModerationMultiModalInput = ModerationsAPI.ModerationMultiModalInput; - export import ModerationTextInput = ModerationsAPI.ModerationTextInput; - export import ModerationCreateResponse = ModerationsAPI.ModerationCreateResponse; - export import ModerationCreateParams = ModerationsAPI.ModerationCreateParams; +export declare namespace Moderations { + export { + type Moderation as Moderation, + type ModerationImageURLInput as ModerationImageURLInput, + type ModerationModel as ModerationModel, + type ModerationMultiModalInput as ModerationMultiModalInput, + type ModerationTextInput as ModerationTextInput, + type ModerationCreateResponse as ModerationCreateResponse, + type ModerationCreateParams as ModerationCreateParams, + }; } diff --git a/src/resources/responses/index.ts b/src/resources/responses/index.ts new file mode 100644 index 000000000..ad3f9a386 --- /dev/null +++ b/src/resources/responses/index.ts @@ -0,0 +1,4 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { InputItems, type ResponseItemList, type InputItemListParams } from './input-items'; +export { Responses } from './responses'; diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts new file mode 100644 index 000000000..74707f184 --- /dev/null +++ b/src/resources/responses/input-items.ts @@ -0,0 +1,103 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as ResponsesAPI from './responses'; +import { ResponseItemsPage } from './responses'; +import { type CursorPageParams } from '../../pagination'; + +export class InputItems extends APIResource { + /** + * Returns a list of input items for a given response. + * + * @example + * ```ts + * // Automatically fetches more pages as needed. + * for await (const responseItem of client.responses.inputItems.list( + * 'response_id', + * )) { + * // ... + * } + * ``` + */ + list( + responseId: string, + query?: InputItemListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + responseId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + responseId: string, + query: InputItemListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(responseId, {}, query); + } + return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemsPage, { + query, + ...options, + }); + } +} + +/** + * A list of Response items. + */ +export interface ResponseItemList { + /** + * A list of items used to generate this response. + */ + data: Array; + + /** + * The ID of the first item in the list. + */ + first_id: string; + + /** + * Whether there are more items available. + */ + has_more: boolean; + + /** + * The ID of the last item in the list. + */ + last_id: string; + + /** + * The type of object returned, must be `list`. + */ + object: 'list'; +} + +export interface InputItemListParams extends CursorPageParams { + /** + * An item ID to list items before, used in pagination. + */ + before?: string; + + /** + * Additional fields to include in the response. See the `include` parameter for + * Response creation above for more information. + */ + include?: Array; + + /** + * The order to return the input items in. Default is `asc`. + * + * - `asc`: Return the input items in ascending order. + * - `desc`: Return the input items in descending order. + */ + order?: 'asc' | 'desc'; +} + +export declare namespace InputItems { + export { type ResponseItemList as ResponseItemList, type InputItemListParams as InputItemListParams }; +} + +export { ResponseItemsPage }; diff --git a/src/resources/responses/input-items.ts.orig b/src/resources/responses/input-items.ts.orig new file mode 100644 index 000000000..470740b61 --- /dev/null +++ b/src/resources/responses/input-items.ts.orig @@ -0,0 +1,114 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as ResponsesAPI from './responses'; +import { ResponseItemsPage } from './responses'; +import { type CursorPageParams } from '../../pagination'; + +export class InputItems extends APIResource { + /** + * Returns a list of input items for a given response. + */ + list( + responseId: string, + query?: InputItemListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + responseId: string, + options?: Core.RequestOptions, + ): Core.PagePromise; + list( + responseId: string, + query: InputItemListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(query)) { + return this.list(responseId, {}, query); + } + return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemsPage, { + query, + ...options, + }); + } +} + +<<<<<<< HEAD +export class ResponseItemListDataPage extends CursorPage< + // @ts-ignore some items don't necessarily have the `id` property + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput +> {} + +||||||| parent of e5ea4a71 (fix(types): improve responses type names (#1392)) +export class ResponseItemListDataPage extends CursorPage< + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput +> {} + +======= +>>>>>>> e5ea4a71 (fix(types): improve responses type names (#1392)) +/** + * A list of Response items. + */ +export interface ResponseItemList { + /** + * A list of items used to generate this response. + */ + data: Array; + + /** + * The ID of the first item in the list. + */ + first_id: string; + + /** + * Whether there are more items available. + */ + has_more: boolean; + + /** + * The ID of the last item in the list. + */ + last_id: string; + + /** + * The type of object returned, must be `list`. + */ + object: 'list'; +} + +export interface InputItemListParams extends CursorPageParams { + /** + * An item ID to list items before, used in pagination. + */ + before?: string; + + /** + * The order to return the input items in. Default is `asc`. + * + * - `asc`: Return the input items in ascending order. + * - `desc`: Return the input items in descending order. + */ + order?: 'asc' | 'desc'; +} + +export declare namespace InputItems { + export { type ResponseItemList as ResponseItemList, type InputItemListParams as InputItemListParams }; +} + +export { ResponseItemsPage }; diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts new file mode 100644 index 000000000..1440e865e --- /dev/null +++ b/src/resources/responses/responses.ts @@ -0,0 +1,3203 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { + type ExtractParsedContentFromParams, + parseResponse, + type ResponseCreateParamsWithTools, + addOutputText, +} from '../../lib/ResponsesParser'; +import * as Core from '../../core'; +import { APIPromise, isRequestOptions } from '../../core'; +import { APIResource } from '../../resource'; +import * as Shared from '../shared'; +import * as InputItemsAPI from './input-items'; +import { InputItemListParams, InputItems, ResponseItemList } from './input-items'; +import * as ResponsesAPI from './responses'; +import { ResponseStream, ResponseStreamParams } from '../../lib/responses/ResponseStream'; +import { CursorPage } from '../../pagination'; +import { Stream } from '../../streaming'; + +export interface ParsedResponseOutputText extends ResponseOutputText { + parsed: ParsedT | null; +} + +export type ParsedContent = ParsedResponseOutputText | ResponseOutputRefusal; + +export interface ParsedResponseOutputMessage extends ResponseOutputMessage { + content: ParsedContent[]; +} + +export interface ParsedResponseFunctionToolCall extends ResponseFunctionToolCall { + parsed_arguments: any; +} + +export type ParsedResponseOutputItem = + | ParsedResponseOutputMessage + | ParsedResponseFunctionToolCall + | ResponseFileSearchToolCall + | ResponseFunctionWebSearch + | ResponseComputerToolCall + | ResponseReasoningItem; + +export interface ParsedResponse extends Response { + output: Array>; + + output_parsed: ParsedT | null; +} + +export type ResponseParseParams = ResponseCreateParamsNonStreaming; +export class Responses extends APIResource { + inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client); + + /** + * Creates a model response. Provide + * [text](https://platform.openai.com/docs/guides/text) or + * [image](https://platform.openai.com/docs/guides/images) inputs to generate + * [text](https://platform.openai.com/docs/guides/text) or + * [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + * the model call your own + * [custom code](https://platform.openai.com/docs/guides/function-calling) or use + * built-in [tools](https://platform.openai.com/docs/guides/tools) like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + * your own data as input for the model's response. + * + * @example + * ```ts + * const response = await client.responses.create({ + * input: 'string', + * model: 'gpt-4o', + * }); + * ``` + */ + create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; + create( + body: ResponseCreateParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + create( + body: ResponseCreateParamsBase, + options?: Core.RequestOptions, + ): APIPromise | Response>; + create( + body: ResponseCreateParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + return ( + this._client.post('/responses', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise> + )._thenUnwrap((rsp) => { + if ('object' in rsp && rsp.object === 'response') { + addOutputText(rsp as Response); + } + + return rsp; + }) as APIPromise | APIPromise>; + } + + /** + * Retrieves a model response with the given ID. + * + * @example + * ```ts + * const response = await client.responses.retrieve( + * 'resp_677efb5139a88190b512bc3fef8e535d', + * ); + * ``` + */ + retrieve( + responseId: string, + query?: ResponseRetrieveParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve(responseId: string, options?: Core.RequestOptions): Core.APIPromise; + retrieve( + responseId: string, + query: ResponseRetrieveParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.retrieve(responseId, {}, query); + } + return this._client.get(`/responses/${responseId}`, { query, ...options }); + } + + /** + * Deletes a model response with the given ID. + * + * @example + * ```ts + * await client.responses.del( + * 'resp_677efb5139a88190b512bc3fef8e535d', + * ); + * ``` + */ + del(responseId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/responses/${responseId}`, { + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); + } + + parse>( + body: Params, + options?: Core.RequestOptions, + ): Core.APIPromise> { + return this._client.responses + .create(body, options) + ._thenUnwrap((response) => parseResponse(response as Response, body)); + } + + /** + * Creates a model response stream + */ + stream>( + body: Params, + options?: Core.RequestOptions, + ): ResponseStream { + return ResponseStream.createResponse(this._client, body, options); + } +} + +export class ResponseItemsPage extends CursorPage {} + +/** + * A tool that controls a virtual computer. Learn more about the + * [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + */ +export interface ComputerTool { + /** + * The height of the computer display. + */ + display_height: number; + + /** + * The width of the computer display. + */ + display_width: number; + + /** + * The type of computer environment to control. + */ + environment: 'windows' | 'mac' | 'linux' | 'ubuntu' | 'browser'; + + /** + * The type of the computer use tool. Always `computer_use_preview`. + */ + type: 'computer-preview'; +} + +/** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ +export interface EasyInputMessage { + /** + * Text, image, or audio input to the model, used to generate a response. Can also + * contain previous assistant responses. + */ + content: string | ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; +} + +/** + * A tool that searches for relevant content from uploaded files. Learn more about + * the + * [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + */ +export interface FileSearchTool { + /** + * The type of the file search tool. Always `file_search`. + */ + type: 'file_search'; + + /** + * The IDs of the vector stores to search. + */ + vector_store_ids: Array; + + /** + * A filter to apply. + */ + filters?: Shared.ComparisonFilter | Shared.CompoundFilter | null; + + /** + * The maximum number of results to return. This number should be between 1 and 50 + * inclusive. + */ + max_num_results?: number; + + /** + * Ranking options for search. + */ + ranking_options?: FileSearchTool.RankingOptions; +} + +export namespace FileSearchTool { + /** + * Ranking options for search. + */ + export interface RankingOptions { + /** + * The ranker to use for the file search. + */ + ranker?: 'auto' | 'default-2024-11-15'; + + /** + * The score threshold for the file search, a number between 0 and 1. Numbers + * closer to 1 will attempt to return only the most relevant results, but may + * return fewer results. + */ + score_threshold?: number; + } +} + +/** + * Defines a function in your own code the model can choose to call. Learn more + * about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ +export interface FunctionTool { + /** + * The name of the function to call. + */ + name: string; + + /** + * A JSON schema object describing the parameters of the function. + */ + parameters: Record | null; + + /** + * Whether to enforce strict parameter validation. Default `true`. + */ + strict: boolean | null; + + /** + * The type of the function tool. Always `function`. + */ + type: 'function'; + + /** + * A description of the function. Used by the model to determine whether or not to + * call the function. + */ + description?: string | null; +} + +export interface Response { + /** + * Unique identifier for this Response. + */ + id: string; + + /** + * Unix timestamp (in seconds) of when this Response was created. + */ + created_at: number; + + output_text: string; + + /** + * An error object returned when the model fails to generate a Response. + */ + error: ResponseError | null; + + /** + * Details about why the response is incomplete. + */ + incomplete_details: Response.IncompleteDetails | null; + + /** + * Inserts a system (or developer) message as the first item in the model's + * context. + * + * When using along with `previous_response_id`, the instructions from a previous + * response will not be carried over to the next response. This makes it simple to + * swap out system (or developer) messages in new responses. + */ + instructions: string | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. + */ + model: Shared.ResponsesModel; + + /** + * The object type of this resource - always set to `response`. + */ + object: 'response'; + + /** + * An array of content items generated by the model. + * + * - The length and order of items in the `output` array is dependent on the + * model's response. + * - Rather than accessing the first item in the `output` array and assuming it's + * an `assistant` message with the content generated by the model, you might + * consider using the `output_text` property where supported in SDKs. + */ + output: Array; + + /** + * Whether to allow the model to run tool calls in parallel. + */ + parallel_tool_calls: boolean; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. + */ + temperature: number | null; + + /** + * How the model should select which tool (or tools) to use when generating a + * response. See the `tools` parameter to see how to specify which tools the model + * can call. + */ + tool_choice: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction; + + /** + * An array of tools the model may call while generating a response. You can + * specify which tool to use by setting the `tool_choice` parameter. + * + * The two categories of tools you can provide the model are: + * + * - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + * capabilities, like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search). + * Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * - **Function calls (custom tools)**: Functions that are defined by you, enabling + * the model to call your own code. Learn more about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ + tools: Array; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p: number | null; + + /** + * An upper bound for the number of tokens that can be generated for a response, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + max_output_tokens?: number | null; + + /** + * The unique ID of the previous response to the model. Use this to create + * multi-turn conversations. Learn more about + * [conversation state](https://platform.openai.com/docs/guides/conversation-state). + */ + previous_response_id?: string | null; + + /** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ + reasoning?: Shared.Reasoning | null; + + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + service_tier?: 'auto' | 'default' | 'flex' | null; + + /** + * The status of the response generation. One of `completed`, `failed`, + * `in_progress`, or `incomplete`. + */ + status?: ResponseStatus; + + /** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ + text?: ResponseTextConfig; + + /** + * The truncation strategy to use for the model response. + * + * - `auto`: If the context of this response and previous ones exceeds the model's + * context window size, the model will truncate the response to fit the context + * window by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size + * for a model, the request will fail with a 400 error. + */ + truncation?: 'auto' | 'disabled' | null; + + /** + * Represents token usage details including input tokens, output tokens, a + * breakdown of output tokens, and the total tokens used. + */ + usage?: ResponseUsage; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + user?: string; +} + +export namespace Response { + /** + * Details about why the response is incomplete. + */ + export interface IncompleteDetails { + /** + * The reason why the response is incomplete. + */ + reason?: 'max_output_tokens' | 'content_filter'; + } +} + +/** + * Emitted when there is a partial audio response. + */ +export interface ResponseAudioDeltaEvent { + /** + * A chunk of Base64 encoded response audio bytes. + */ + delta: string; + + /** + * The type of the event. Always `response.audio.delta`. + */ + type: 'response.audio.delta'; +} + +/** + * Emitted when the audio response is complete. + */ +export interface ResponseAudioDoneEvent { + /** + * The type of the event. Always `response.audio.done`. + */ + type: 'response.audio.done'; +} + +/** + * Emitted when there is a partial transcript of audio. + */ +export interface ResponseAudioTranscriptDeltaEvent { + /** + * The partial transcript of the audio response. + */ + delta: string; + + /** + * The type of the event. Always `response.audio.transcript.delta`. + */ + type: 'response.audio.transcript.delta'; +} + +/** + * Emitted when the full audio transcript is completed. + */ +export interface ResponseAudioTranscriptDoneEvent { + /** + * The type of the event. Always `response.audio.transcript.done`. + */ + type: 'response.audio.transcript.done'; +} + +/** + * Emitted when a partial code snippet is added by the code interpreter. + */ +export interface ResponseCodeInterpreterCallCodeDeltaEvent { + /** + * The partial code snippet added by the code interpreter. + */ + delta: string; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.code.delta`. + */ + type: 'response.code_interpreter_call.code.delta'; +} + +/** + * Emitted when code snippet output is finalized by the code interpreter. + */ +export interface ResponseCodeInterpreterCallCodeDoneEvent { + /** + * The final code snippet output by the code interpreter. + */ + code: string; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.code.done`. + */ + type: 'response.code_interpreter_call.code.done'; +} + +/** + * Emitted when the code interpreter call is completed. + */ +export interface ResponseCodeInterpreterCallCompletedEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.completed`. + */ + type: 'response.code_interpreter_call.completed'; +} + +/** + * Emitted when a code interpreter call is in progress. + */ +export interface ResponseCodeInterpreterCallInProgressEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.in_progress`. + */ + type: 'response.code_interpreter_call.in_progress'; +} + +/** + * Emitted when the code interpreter is actively interpreting the code snippet. + */ +export interface ResponseCodeInterpreterCallInterpretingEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.interpreting`. + */ + type: 'response.code_interpreter_call.interpreting'; +} + +/** + * A tool call to run code. + */ +export interface ResponseCodeInterpreterToolCall { + /** + * The unique ID of the code interpreter tool call. + */ + id: string; + + /** + * The code to run. + */ + code: string; + + /** + * The results of the code interpreter tool call. + */ + results: Array; + + /** + * The status of the code interpreter tool call. + */ + status: 'in_progress' | 'interpreting' | 'completed'; + + /** + * The type of the code interpreter tool call. Always `code_interpreter_call`. + */ + type: 'code_interpreter_call'; +} + +export namespace ResponseCodeInterpreterToolCall { + /** + * The output of a code interpreter tool call that is text. + */ + export interface Logs { + /** + * The logs of the code interpreter tool call. + */ + logs: string; + + /** + * The type of the code interpreter text output. Always `logs`. + */ + type: 'logs'; + } + + /** + * The output of a code interpreter tool call that is a file. + */ + export interface Files { + files: Array; + + /** + * The type of the code interpreter file output. Always `files`. + */ + type: 'files'; + } + + export namespace Files { + export interface File { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The MIME type of the file. + */ + mime_type: string; + } + } +} + +/** + * Emitted when the model response is complete. + */ +export interface ResponseCompletedEvent { + /** + * Properties of the completed response. + */ + response: Response; + + /** + * The type of the event. Always `response.completed`. + */ + type: 'response.completed'; +} + +/** + * A tool call to a computer use tool. See the + * [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) + * for more information. + */ +export interface ResponseComputerToolCall { + /** + * The unique ID of the computer call. + */ + id: string; + + /** + * A click action. + */ + action: + | ResponseComputerToolCall.Click + | ResponseComputerToolCall.DoubleClick + | ResponseComputerToolCall.Drag + | ResponseComputerToolCall.Keypress + | ResponseComputerToolCall.Move + | ResponseComputerToolCall.Screenshot + | ResponseComputerToolCall.Scroll + | ResponseComputerToolCall.Type + | ResponseComputerToolCall.Wait; + + /** + * An identifier used when responding to the tool call with output. + */ + call_id: string; + + /** + * The pending safety checks for the computer call. + */ + pending_safety_checks: Array; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the computer call. Always `computer_call`. + */ + type: 'computer_call'; +} + +export namespace ResponseComputerToolCall { + /** + * A click action. + */ + export interface Click { + /** + * Indicates which mouse button was pressed during the click. One of `left`, + * `right`, `wheel`, `back`, or `forward`. + */ + button: 'left' | 'right' | 'wheel' | 'back' | 'forward'; + + /** + * Specifies the event type. For a click action, this property is always set to + * `click`. + */ + type: 'click'; + + /** + * The x-coordinate where the click occurred. + */ + x: number; + + /** + * The y-coordinate where the click occurred. + */ + y: number; + } + + /** + * A double click action. + */ + export interface DoubleClick { + /** + * Specifies the event type. For a double click action, this property is always set + * to `double_click`. + */ + type: 'double_click'; + + /** + * The x-coordinate where the double click occurred. + */ + x: number; + + /** + * The y-coordinate where the double click occurred. + */ + y: number; + } + + /** + * A drag action. + */ + export interface Drag { + /** + * An array of coordinates representing the path of the drag action. Coordinates + * will appear as an array of objects, eg + * + * ``` + * [ + * { x: 100, y: 200 }, + * { x: 200, y: 300 } + * ] + * ``` + */ + path: Array; + + /** + * Specifies the event type. For a drag action, this property is always set to + * `drag`. + */ + type: 'drag'; + } + + export namespace Drag { + /** + * A series of x/y coordinate pairs in the drag path. + */ + export interface Path { + /** + * The x-coordinate. + */ + x: number; + + /** + * The y-coordinate. + */ + y: number; + } + } + + /** + * A collection of keypresses the model would like to perform. + */ + export interface Keypress { + /** + * The combination of keys the model is requesting to be pressed. This is an array + * of strings, each representing a key. + */ + keys: Array; + + /** + * Specifies the event type. For a keypress action, this property is always set to + * `keypress`. + */ + type: 'keypress'; + } + + /** + * A mouse move action. + */ + export interface Move { + /** + * Specifies the event type. For a move action, this property is always set to + * `move`. + */ + type: 'move'; + + /** + * The x-coordinate to move to. + */ + x: number; + + /** + * The y-coordinate to move to. + */ + y: number; + } + + /** + * A screenshot action. + */ + export interface Screenshot { + /** + * Specifies the event type. For a screenshot action, this property is always set + * to `screenshot`. + */ + type: 'screenshot'; + } + + /** + * A scroll action. + */ + export interface Scroll { + /** + * The horizontal scroll distance. + */ + scroll_x: number; + + /** + * The vertical scroll distance. + */ + scroll_y: number; + + /** + * Specifies the event type. For a scroll action, this property is always set to + * `scroll`. + */ + type: 'scroll'; + + /** + * The x-coordinate where the scroll occurred. + */ + x: number; + + /** + * The y-coordinate where the scroll occurred. + */ + y: number; + } + + /** + * An action to type in text. + */ + export interface Type { + /** + * The text to type. + */ + text: string; + + /** + * Specifies the event type. For a type action, this property is always set to + * `type`. + */ + type: 'type'; + } + + /** + * A wait action. + */ + export interface Wait { + /** + * Specifies the event type. For a wait action, this property is always set to + * `wait`. + */ + type: 'wait'; + } + + /** + * A pending safety check for the computer call. + */ + export interface PendingSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } +} + +export interface ResponseComputerToolCallOutputItem { + /** + * The unique ID of the computer call tool output. + */ + id: string; + + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ResponseComputerToolCallOutputScreenshot; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +export namespace ResponseComputerToolCallOutputItem { + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } +} + +/** + * A computer screenshot image used with the computer use tool. + */ +export interface ResponseComputerToolCallOutputScreenshot { + /** + * Specifies the event type. For a computer screenshot, this property is always set + * to `computer_screenshot`. + */ + type: 'computer_screenshot'; + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + file_id?: string; + + /** + * The URL of the screenshot image. + */ + image_url?: string; +} + +/** + * Multi-modal input and output contents. + */ +export type ResponseContent = + | ResponseInputText + | ResponseInputImage + | ResponseInputFile + | ResponseOutputText + | ResponseOutputRefusal; + +/** + * Emitted when a new content part is added. + */ +export interface ResponseContentPartAddedEvent { + /** + * The index of the content part that was added. + */ + content_index: number; + + /** + * The ID of the output item that the content part was added to. + */ + item_id: string; + + /** + * The index of the output item that the content part was added to. + */ + output_index: number; + + /** + * The content part that was added. + */ + part: ResponseOutputText | ResponseOutputRefusal; + + /** + * The type of the event. Always `response.content_part.added`. + */ + type: 'response.content_part.added'; +} + +/** + * Emitted when a content part is done. + */ +export interface ResponseContentPartDoneEvent { + /** + * The index of the content part that is done. + */ + content_index: number; + + /** + * The ID of the output item that the content part was added to. + */ + item_id: string; + + /** + * The index of the output item that the content part was added to. + */ + output_index: number; + + /** + * The content part that is done. + */ + part: ResponseOutputText | ResponseOutputRefusal; + + /** + * The type of the event. Always `response.content_part.done`. + */ + type: 'response.content_part.done'; +} + +/** + * An event that is emitted when a response is created. + */ +export interface ResponseCreatedEvent { + /** + * The response that was created. + */ + response: Response; + + /** + * The type of the event. Always `response.created`. + */ + type: 'response.created'; +} + +/** + * An error object returned when the model fails to generate a Response. + */ +export interface ResponseError { + /** + * The error code for the response. + */ + code: + | 'server_error' + | 'rate_limit_exceeded' + | 'invalid_prompt' + | 'vector_store_timeout' + | 'invalid_image' + | 'invalid_image_format' + | 'invalid_base64_image' + | 'invalid_image_url' + | 'image_too_large' + | 'image_too_small' + | 'image_parse_error' + | 'image_content_policy_violation' + | 'invalid_image_mode' + | 'image_file_too_large' + | 'unsupported_image_media_type' + | 'empty_image_file' + | 'failed_to_download_image' + | 'image_file_not_found'; + + /** + * A human-readable description of the error. + */ + message: string; +} + +/** + * Emitted when an error occurs. + */ +export interface ResponseErrorEvent { + /** + * The error code. + */ + code: string | null; + + /** + * The error message. + */ + message: string; + + /** + * The error parameter. + */ + param: string | null; + + /** + * The type of the event. Always `error`. + */ + type: 'error'; +} + +/** + * An event that is emitted when a response fails. + */ +export interface ResponseFailedEvent { + /** + * The response that failed. + */ + response: Response; + + /** + * The type of the event. Always `response.failed`. + */ + type: 'response.failed'; +} + +/** + * Emitted when a file search call is completed (results found). + */ +export interface ResponseFileSearchCallCompletedEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is initiated. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.completed`. + */ + type: 'response.file_search_call.completed'; +} + +/** + * Emitted when a file search call is initiated. + */ +export interface ResponseFileSearchCallInProgressEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is initiated. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.in_progress`. + */ + type: 'response.file_search_call.in_progress'; +} + +/** + * Emitted when a file search is currently searching. + */ +export interface ResponseFileSearchCallSearchingEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is searching. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.searching`. + */ + type: 'response.file_search_call.searching'; +} + +/** + * The results of a file search tool call. See the + * [file search guide](https://platform.openai.com/docs/guides/tools-file-search) + * for more information. + */ +export interface ResponseFileSearchToolCall { + /** + * The unique ID of the file search tool call. + */ + id: string; + + /** + * The queries used to search for files. + */ + queries: Array; + + /** + * The status of the file search tool call. One of `in_progress`, `searching`, + * `incomplete` or `failed`, + */ + status: 'in_progress' | 'searching' | 'completed' | 'incomplete' | 'failed'; + + /** + * The type of the file search tool call. Always `file_search_call`. + */ + type: 'file_search_call'; + + /** + * The results of the file search tool call. + */ + results?: Array | null; +} + +export namespace ResponseFileSearchToolCall { + export interface Result { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + + /** + * The unique ID of the file. + */ + file_id?: string; + + /** + * The name of the file. + */ + filename?: string; + + /** + * The relevance score of the file - a value between 0 and 1. + */ + score?: number; + + /** + * The text that was retrieved from the file. + */ + text?: string; + } +} + +/** + * An object specifying the format that the model must output. + * + * Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + * ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * The default format is `{ "type": "text" }` with no additional options. + * + * **Not recommended for gpt-4o and newer models:** + * + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. + */ +export type ResponseFormatTextConfig = + | Shared.ResponseFormatText + | ResponseFormatTextJSONSchemaConfig + | Shared.ResponseFormatJSONObject; + +/** + * JSON Schema response format. Used to generate structured JSON responses. Learn + * more about + * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + */ +export interface ResponseFormatTextJSONSchemaConfig { + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + */ + name: string; + + /** + * The schema for the response format, described as a JSON Schema object. Learn how + * to build JSON schemas [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of response format being defined. Always `json_schema`. + */ + type: 'json_schema'; + + /** + * A description of what the response format is for, used by the model to determine + * how to respond in the format. + */ + description?: string; + + /** + * Whether to enable strict schema adherence when generating the output. If set to + * true, the model will always follow the exact schema defined in the `schema` + * field. Only a subset of JSON Schema is supported when `strict` is `true`. To + * learn more, read the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + */ + strict?: boolean | null; +} + +/** + * Emitted when there is a partial function-call arguments delta. + */ +export interface ResponseFunctionCallArgumentsDeltaEvent { + /** + * The function-call arguments delta that is added. + */ + delta: string; + + /** + * The ID of the output item that the function-call arguments delta is added to. + */ + item_id: string; + + /** + * The index of the output item that the function-call arguments delta is added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.function_call_arguments.delta`. + */ + type: 'response.function_call_arguments.delta'; +} + +/** + * Emitted when function-call arguments are finalized. + */ +export interface ResponseFunctionCallArgumentsDoneEvent { + /** + * The function-call arguments. + */ + arguments: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item. + */ + output_index: number; + + type: 'response.function_call_arguments.done'; +} + +/** + * A tool call to run a function. See the + * [function calling guide](https://platform.openai.com/docs/guides/function-calling) + * for more information. + */ +export interface ResponseFunctionToolCall { + /** + * A JSON string of the arguments to pass to the function. + */ + arguments: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * The name of the function to run. + */ + name: string; + + /** + * The type of the function tool call. Always `function_call`. + */ + type: 'function_call'; + + /** + * The unique ID of the function tool call. + */ + id?: string; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +/** + * A tool call to run a function. See the + * [function calling guide](https://platform.openai.com/docs/guides/function-calling) + * for more information. + */ +export interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall { + /** + * The unique ID of the function tool call. + */ + id: string; +} + +export interface ResponseFunctionToolCallOutputItem { + /** + * The unique ID of the function call tool output. + */ + id: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +/** + * The results of a web search tool call. See the + * [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for + * more information. + */ +export interface ResponseFunctionWebSearch { + /** + * The unique ID of the web search tool call. + */ + id: string; + + /** + * The status of the web search tool call. + */ + status: 'in_progress' | 'searching' | 'completed' | 'failed'; + + /** + * The type of the web search tool call. Always `web_search_call`. + */ + type: 'web_search_call'; +} + +/** + * Emitted when the response is in progress. + */ +export interface ResponseInProgressEvent { + /** + * The response that is in progress. + */ + response: Response; + + /** + * The type of the event. Always `response.in_progress`. + */ + type: 'response.in_progress'; +} + +/** + * Specify additional output data to include in the model response. Currently + * supported values are: + * + * - `file_search_call.results`: Include the search results of the file search tool + * call. + * - `message.input_image.image_url`: Include image urls from the input message. + * - `computer_call_output.output.image_url`: Include image urls from the computer + * call output. + * - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + * tokens in reasoning item outputs. This enables reasoning items to be used in + * multi-turn conversations when using the Responses API statelessly (like when + * the `store` parameter is set to `false`, or when an organization is enrolled + * in the zero data retention program). + */ +export type ResponseIncludable = + | 'file_search_call.results' + | 'message.input_image.image_url' + | 'computer_call_output.output.image_url' + | 'reasoning.encrypted_content'; + +/** + * An event that is emitted when a response finishes as incomplete. + */ +export interface ResponseIncompleteEvent { + /** + * The response that was incomplete. + */ + response: Response; + + /** + * The type of the event. Always `response.incomplete`. + */ + type: 'response.incomplete'; +} + +/** + * A list of one or many input items to the model, containing different content + * types. + */ +export type ResponseInput = Array; + +/** + * An audio input to the model. + */ +export interface ResponseInputAudio { + /** + * Base64-encoded audio data. + */ + data: string; + + /** + * The format of the audio data. Currently supported formats are `mp3` and `wav`. + */ + format: 'mp3' | 'wav'; + + /** + * The type of the input item. Always `input_audio`. + */ + type: 'input_audio'; +} + +/** + * A text input to the model. + */ +export type ResponseInputContent = ResponseInputText | ResponseInputImage | ResponseInputFile; + +/** + * A file input to the model. + */ +export interface ResponseInputFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The content of the file to be sent to the model. + */ + file_data?: string; + + /** + * The ID of the file to be sent to the model. + */ + file_id?: string | null; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; +} + +/** + * An image input to the model. Learn about + * [image inputs](https://platform.openai.com/docs/guides/vision). + */ +export interface ResponseInputImage { + /** + * The detail level of the image to be sent to the model. One of `high`, `low`, or + * `auto`. Defaults to `auto`. + */ + detail: 'low' | 'high' | 'auto'; + + /** + * The type of the input item. Always `input_image`. + */ + type: 'input_image'; + + /** + * The ID of the file to be sent to the model. + */ + file_id?: string | null; + + /** + * The URL of the image to be sent to the model. A fully qualified URL or base64 + * encoded image in a data URL. + */ + image_url?: string | null; +} + +/** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ +export type ResponseInputItem = + | EasyInputMessage + | ResponseInputItem.Message + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseComputerToolCall + | ResponseInputItem.ComputerCallOutput + | ResponseFunctionWebSearch + | ResponseFunctionToolCall + | ResponseInputItem.FunctionCallOutput + | ResponseReasoningItem + | ResponseInputItem.ItemReference; + +export namespace ResponseInputItem { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. + */ + export interface Message { + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponsesAPI.ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; + } + + /** + * The output of a computer tool call. + */ + export interface ComputerCallOutput { + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ResponsesAPI.ResponseComputerToolCallOutputScreenshot; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The ID of the computer tool call output. + */ + id?: string | null; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array | null; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete' | null; + } + + export namespace ComputerCallOutput { + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code?: string | null; + + /** + * Details about the pending safety check. + */ + message?: string | null; + } + } + + /** + * The output of a function tool call. + */ + export interface FunctionCallOutput { + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The unique ID of the function tool call output. Populated when this item is + * returned via API. + */ + id?: string | null; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete' | null; + } + + /** + * An internal identifier for an item to reference. + */ + export interface ItemReference { + /** + * The ID of the item to reference. + */ + id: string; + + /** + * The type of item to reference. Always `item_reference`. + */ + type?: 'item_reference' | null; + } +} + +/** + * A list of one or many input items to the model, containing different content + * types. + */ +export type ResponseInputMessageContentList = Array; + +export interface ResponseInputMessageItem { + /** + * The unique ID of the message input. + */ + id: string; + + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; +} + +/** + * A text input to the model. + */ +export interface ResponseInputText { + /** + * The text input to the model. + */ + text: string; + + /** + * The type of the input item. Always `input_text`. + */ + type: 'input_text'; +} + +/** + * Content item used to generate a response. + */ +export type ResponseItem = + | ResponseInputMessageItem + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseComputerToolCall + | ResponseComputerToolCallOutputItem + | ResponseFunctionWebSearch + | ResponseFunctionToolCallItem + | ResponseFunctionToolCallOutputItem; + +/** + * An audio output from the model. + */ +export interface ResponseOutputAudio { + /** + * Base64-encoded audio data from the model. + */ + data: string; + + /** + * The transcript of the audio data from the model. + */ + transcript: string; + + /** + * The type of the output audio. Always `output_audio`. + */ + type: 'output_audio'; +} + +/** + * An output message from the model. + */ +export type ResponseOutputItem = + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseFunctionToolCall + | ResponseFunctionWebSearch + | ResponseComputerToolCall + | ResponseReasoningItem; + +/** + * Emitted when a new output item is added. + */ +export interface ResponseOutputItemAddedEvent { + /** + * The output item that was added. + */ + item: ResponseOutputItem; + + /** + * The index of the output item that was added. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_item.added`. + */ + type: 'response.output_item.added'; +} + +/** + * Emitted when an output item is marked done. + */ +export interface ResponseOutputItemDoneEvent { + /** + * The output item that was marked done. + */ + item: ResponseOutputItem; + + /** + * The index of the output item that was marked done. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_item.done`. + */ + type: 'response.output_item.done'; +} + +/** + * An output message from the model. + */ +export interface ResponseOutputMessage { + /** + * The unique ID of the output message. + */ + id: string; + + /** + * The content of the output message. + */ + content: Array; + + /** + * The role of the output message. Always `assistant`. + */ + role: 'assistant'; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the output message. Always `message`. + */ + type: 'message'; +} + +/** + * A refusal from the model. + */ +export interface ResponseOutputRefusal { + /** + * The refusal explanationfrom the model. + */ + refusal: string; + + /** + * The type of the refusal. Always `refusal`. + */ + type: 'refusal'; +} + +/** + * A text output from the model. + */ +export interface ResponseOutputText { + /** + * The annotations of the text output. + */ + annotations: Array< + ResponseOutputText.FileCitation | ResponseOutputText.URLCitation | ResponseOutputText.FilePath + >; + + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; +} + +export namespace ResponseOutputText { + /** + * A citation to a file. + */ + export interface FileCitation { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file citation. Always `file_citation`. + */ + type: 'file_citation'; + } + + /** + * A citation for a web resource used to generate a model response. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * The URL of the web resource. + */ + url: string; + } + + /** + * A path to a file. + */ + export interface FilePath { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file path. Always `file_path`. + */ + type: 'file_path'; + } +} + +/** + * A description of the chain of thought used by a reasoning model while generating + * a response. Be sure to include these items in your `input` to the Responses API + * for subsequent turns of a conversation if you are manually + * [managing context](https://platform.openai.com/docs/guides/conversation-state). + */ +export interface ResponseReasoningItem { + /** + * The unique identifier of the reasoning content. + */ + id: string; + + /** + * Reasoning text contents. + */ + summary: Array; + + /** + * The type of the object. Always `reasoning`. + */ + type: 'reasoning'; + + /** + * The encrypted content of the reasoning item - populated when a response is + * generated with `reasoning.encrypted_content` in the `include` parameter. + */ + encrypted_content?: string | null; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +export namespace ResponseReasoningItem { + export interface Summary { + /** + * A short summary of the reasoning used by the model when generating the response. + */ + text: string; + + /** + * The type of the object. Always `summary_text`. + */ + type: 'summary_text'; + } +} + +/** + * Emitted when a new reasoning summary part is added. + */ +export interface ResponseReasoningSummaryPartAddedEvent { + /** + * The ID of the item this summary part is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary part is associated with. + */ + output_index: number; + + /** + * The summary part that was added. + */ + part: ResponseReasoningSummaryPartAddedEvent.Part; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The type of the event. Always `response.reasoning_summary_part.added`. + */ + type: 'response.reasoning_summary_part.added'; +} + +export namespace ResponseReasoningSummaryPartAddedEvent { + /** + * The summary part that was added. + */ + export interface Part { + /** + * The text of the summary part. + */ + text: string; + + /** + * The type of the summary part. Always `summary_text`. + */ + type: 'summary_text'; + } +} + +/** + * Emitted when a reasoning summary part is completed. + */ +export interface ResponseReasoningSummaryPartDoneEvent { + /** + * The ID of the item this summary part is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary part is associated with. + */ + output_index: number; + + /** + * The completed summary part. + */ + part: ResponseReasoningSummaryPartDoneEvent.Part; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The type of the event. Always `response.reasoning_summary_part.done`. + */ + type: 'response.reasoning_summary_part.done'; +} + +export namespace ResponseReasoningSummaryPartDoneEvent { + /** + * The completed summary part. + */ + export interface Part { + /** + * The text of the summary part. + */ + text: string; + + /** + * The type of the summary part. Always `summary_text`. + */ + type: 'summary_text'; + } +} + +/** + * Emitted when a delta is added to a reasoning summary text. + */ +export interface ResponseReasoningSummaryTextDeltaEvent { + /** + * The text delta that was added to the summary. + */ + delta: string; + + /** + * The ID of the item this summary text delta is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary text delta is associated with. + */ + output_index: number; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The type of the event. Always `response.reasoning_summary_text.delta`. + */ + type: 'response.reasoning_summary_text.delta'; +} + +/** + * Emitted when a reasoning summary text is completed. + */ +export interface ResponseReasoningSummaryTextDoneEvent { + /** + * The ID of the item this summary text is associated with. + */ + item_id: string; + + /** + * The index of the output item this summary text is associated with. + */ + output_index: number; + + /** + * The index of the summary part within the reasoning summary. + */ + summary_index: number; + + /** + * The full text of the completed reasoning summary. + */ + text: string; + + /** + * The type of the event. Always `response.reasoning_summary_text.done`. + */ + type: 'response.reasoning_summary_text.done'; +} + +/** + * Emitted when there is a partial refusal text. + */ +export interface ResponseRefusalDeltaEvent { + /** + * The index of the content part that the refusal text is added to. + */ + content_index: number; + + /** + * The refusal text that is added. + */ + delta: string; + + /** + * The ID of the output item that the refusal text is added to. + */ + item_id: string; + + /** + * The index of the output item that the refusal text is added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.refusal.delta`. + */ + type: 'response.refusal.delta'; +} + +/** + * Emitted when refusal text is finalized. + */ +export interface ResponseRefusalDoneEvent { + /** + * The index of the content part that the refusal text is finalized. + */ + content_index: number; + + /** + * The ID of the output item that the refusal text is finalized. + */ + item_id: string; + + /** + * The index of the output item that the refusal text is finalized. + */ + output_index: number; + + /** + * The refusal text that is finalized. + */ + refusal: string; + + /** + * The type of the event. Always `response.refusal.done`. + */ + type: 'response.refusal.done'; +} + +/** + * The status of the response generation. One of `completed`, `failed`, + * `in_progress`, or `incomplete`. + */ +export type ResponseStatus = 'completed' | 'failed' | 'in_progress' | 'incomplete'; + +/** + * Emitted when there is a partial audio response. + */ +export type ResponseStreamEvent = + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseCodeInterpreterCallCodeDeltaEvent + | ResponseCodeInterpreterCallCodeDoneEvent + | ResponseCodeInterpreterCallCompletedEvent + | ResponseCodeInterpreterCallInProgressEvent + | ResponseCodeInterpreterCallInterpretingEvent + | ResponseCompletedEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseCreatedEvent + | ResponseErrorEvent + | ResponseFileSearchCallCompletedEvent + | ResponseFileSearchCallInProgressEvent + | ResponseFileSearchCallSearchingEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | ResponseInProgressEvent + | ResponseFailedEvent + | ResponseIncompleteEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseReasoningSummaryPartAddedEvent + | ResponseReasoningSummaryPartDoneEvent + | ResponseReasoningSummaryTextDeltaEvent + | ResponseReasoningSummaryTextDoneEvent + | ResponseRefusalDeltaEvent + | ResponseRefusalDoneEvent + | ResponseTextAnnotationDeltaEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | ResponseWebSearchCallCompletedEvent + | ResponseWebSearchCallInProgressEvent + | ResponseWebSearchCallSearchingEvent; + +/** + * Emitted when a text annotation is added. + */ +export interface ResponseTextAnnotationDeltaEvent { + /** + * A citation to a file. + */ + annotation: + | ResponseTextAnnotationDeltaEvent.FileCitation + | ResponseTextAnnotationDeltaEvent.URLCitation + | ResponseTextAnnotationDeltaEvent.FilePath; + + /** + * The index of the annotation that was added. + */ + annotation_index: number; + + /** + * The index of the content part that the text annotation was added to. + */ + content_index: number; + + /** + * The ID of the output item that the text annotation was added to. + */ + item_id: string; + + /** + * The index of the output item that the text annotation was added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_text.annotation.added`. + */ + type: 'response.output_text.annotation.added'; +} + +export namespace ResponseTextAnnotationDeltaEvent { + /** + * A citation to a file. + */ + export interface FileCitation { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file citation. Always `file_citation`. + */ + type: 'file_citation'; + } + + /** + * A citation for a web resource used to generate a model response. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * The URL of the web resource. + */ + url: string; + } + + /** + * A path to a file. + */ + export interface FilePath { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file path. Always `file_path`. + */ + type: 'file_path'; + } +} + +/** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ +export interface ResponseTextConfig { + /** + * An object specifying the format that the model must output. + * + * Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + * ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * The default format is `{ "type": "text" }` with no additional options. + * + * **Not recommended for gpt-4o and newer models:** + * + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. + */ + format?: ResponseFormatTextConfig; +} + +/** + * Emitted when there is an additional text delta. + */ +export interface ResponseTextDeltaEvent { + /** + * The index of the content part that the text delta was added to. + */ + content_index: number; + + /** + * The text delta that was added. + */ + delta: string; + + /** + * The ID of the output item that the text delta was added to. + */ + item_id: string; + + /** + * The index of the output item that the text delta was added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_text.delta`. + */ + type: 'response.output_text.delta'; +} + +/** + * Emitted when text content is finalized. + */ +export interface ResponseTextDoneEvent { + /** + * The index of the content part that the text content is finalized. + */ + content_index: number; + + /** + * The ID of the output item that the text content is finalized. + */ + item_id: string; + + /** + * The index of the output item that the text content is finalized. + */ + output_index: number; + + /** + * The text content that is finalized. + */ + text: string; + + /** + * The type of the event. Always `response.output_text.done`. + */ + type: 'response.output_text.done'; +} + +/** + * Represents token usage details including input tokens, output tokens, a + * breakdown of output tokens, and the total tokens used. + */ +export interface ResponseUsage { + /** + * The number of input tokens. + */ + input_tokens: number; + + /** + * A detailed breakdown of the input tokens. + */ + input_tokens_details: ResponseUsage.InputTokensDetails; + + /** + * The number of output tokens. + */ + output_tokens: number; + + /** + * A detailed breakdown of the output tokens. + */ + output_tokens_details: ResponseUsage.OutputTokensDetails; + + /** + * The total number of tokens used. + */ + total_tokens: number; +} + +export namespace ResponseUsage { + /** + * A detailed breakdown of the input tokens. + */ + export interface InputTokensDetails { + /** + * The number of tokens that were retrieved from the cache. + * [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + */ + cached_tokens: number; + } + + /** + * A detailed breakdown of the output tokens. + */ + export interface OutputTokensDetails { + /** + * The number of reasoning tokens. + */ + reasoning_tokens: number; + } +} + +/** + * Emitted when a web search call is completed. + */ +export interface ResponseWebSearchCallCompletedEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.completed`. + */ + type: 'response.web_search_call.completed'; +} + +/** + * Emitted when a web search call is initiated. + */ +export interface ResponseWebSearchCallInProgressEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.in_progress`. + */ + type: 'response.web_search_call.in_progress'; +} + +/** + * Emitted when a web search call is executing. + */ +export interface ResponseWebSearchCallSearchingEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.searching`. + */ + type: 'response.web_search_call.searching'; +} + +/** + * A tool that can be used to generate a response. + */ +export type Tool = FileSearchTool | FunctionTool | WebSearchTool | ComputerTool; + +/** + * Use this option to force the model to call a specific function. + */ +export interface ToolChoiceFunction { + /** + * The name of the function to call. + */ + name: string; + + /** + * For function calling, the type is always `function`. + */ + type: 'function'; +} + +/** + * Controls which (if any) tool is called by the model. + * + * `none` means the model will not call any tool and instead generates a message. + * + * `auto` means the model can pick between generating a message or calling one or + * more tools. + * + * `required` means the model must call one or more tools. + */ +export type ToolChoiceOptions = 'none' | 'auto' | 'required'; + +/** + * Indicates that the model should use a built-in tool to generate a response. + * [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). + */ +export interface ToolChoiceTypes { + /** + * The type of hosted tool the model should to use. Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * + * Allowed values are: + * + * - `file_search` + * - `web_search_preview` + * - `computer_use_preview` + */ + type: 'file_search' | 'web_search_preview' | 'computer_use_preview' | 'web_search_preview_2025_03_11'; +} + +/** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + */ +export interface WebSearchTool { + /** + * The type of the web search tool. One of `web_search_preview` or + * `web_search_preview_2025_03_11`. + */ + type: 'web_search_preview' | 'web_search_preview_2025_03_11'; + + /** + * High level guidance for the amount of context window space to use for the + * search. One of `low`, `medium`, or `high`. `medium` is the default. + */ + search_context_size?: 'low' | 'medium' | 'high'; + + /** + * The user's location. + */ + user_location?: WebSearchTool.UserLocation | null; +} + +export namespace WebSearchTool { + /** + * The user's location. + */ + export interface UserLocation { + /** + * The type of location approximation. Always `approximate`. + */ + type: 'approximate'; + + /** + * Free text input for the city of the user, e.g. `San Francisco`. + */ + city?: string | null; + + /** + * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + * the user, e.g. `US`. + */ + country?: string | null; + + /** + * Free text input for the region of the user, e.g. `California`. + */ + region?: string | null; + + /** + * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + * user, e.g. `America/Los_Angeles`. + */ + timezone?: string | null; + } +} + +export type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsStreaming; + +export interface ResponseCreateParamsBase { + /** + * Text, image, or file inputs to the model, used to generate a response. + * + * Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Image inputs](https://platform.openai.com/docs/guides/images) + * - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + * - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + * - [Function calling](https://platform.openai.com/docs/guides/function-calling) + */ + input: string | ResponseInput; + + /** + * Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. + */ + model: Shared.ResponsesModel; + + /** + * Specify additional output data to include in the model response. Currently + * supported values are: + * + * - `file_search_call.results`: Include the search results of the file search tool + * call. + * - `message.input_image.image_url`: Include image urls from the input message. + * - `computer_call_output.output.image_url`: Include image urls from the computer + * call output. + * - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + * tokens in reasoning item outputs. This enables reasoning items to be used in + * multi-turn conversations when using the Responses API statelessly (like when + * the `store` parameter is set to `false`, or when an organization is enrolled + * in the zero data retention program). + */ + include?: Array | null; + + /** + * Inserts a system (or developer) message as the first item in the model's + * context. + * + * When using along with `previous_response_id`, the instructions from a previous + * response will not be carried over to the next response. This makes it simple to + * swap out system (or developer) messages in new responses. + */ + instructions?: string | null; + + /** + * An upper bound for the number of tokens that can be generated for a response, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + max_output_tokens?: number | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * Whether to allow the model to run tool calls in parallel. + */ + parallel_tool_calls?: boolean | null; + + /** + * The unique ID of the previous response to the model. Use this to create + * multi-turn conversations. Learn more about + * [conversation state](https://platform.openai.com/docs/guides/conversation-state). + */ + previous_response_id?: string | null; + + /** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ + reasoning?: Shared.Reasoning | null; + + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * + * - If set to 'auto', and the Project is Scale tier enabled, the system will + * utilize scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will + * be processed using the default service tier with a lower uptime SLA and no + * latency guarentee. + * - If set to 'default', the request will be processed using the default service + * tier with a lower uptime SLA and no latency guarentee. + * - If set to 'flex', the request will be processed with the Flex Processing + * service tier. + * [Learn more](https://platform.openai.com/docs/guides/flex-processing). + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + service_tier?: 'auto' | 'default' | 'flex' | null; + + /** + * Whether to store the generated model response for later retrieval via API. + */ + store?: boolean | null; + + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream?: boolean | null; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. + */ + temperature?: number | null; + + /** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ + text?: ResponseTextConfig; + + /** + * How the model should select which tool (or tools) to use when generating a + * response. See the `tools` parameter to see how to specify which tools the model + * can call. + */ + tool_choice?: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction; + + /** + * An array of tools the model may call while generating a response. You can + * specify which tool to use by setting the `tool_choice` parameter. + * + * The two categories of tools you can provide the model are: + * + * - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + * capabilities, like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search). + * Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * - **Function calls (custom tools)**: Functions that are defined by you, enabling + * the model to call your own code. Learn more about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ + tools?: Array; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p?: number | null; + + /** + * The truncation strategy to use for the model response. + * + * - `auto`: If the context of this response and previous ones exceeds the model's + * context window size, the model will truncate the response to fit the context + * window by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size + * for a model, the request will fail with a 400 error. + */ + truncation?: 'auto' | 'disabled' | null; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + user?: string; +} + +export namespace ResponseCreateParams { + export type ResponseCreateParamsNonStreaming = ResponsesAPI.ResponseCreateParamsNonStreaming; + export type ResponseCreateParamsStreaming = ResponsesAPI.ResponseCreateParamsStreaming; +} + +export interface ResponseCreateParamsNonStreaming extends ResponseCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream?: false | null; +} + +export interface ResponseCreateParamsStreaming extends ResponseCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream: true; +} + +export interface ResponseRetrieveParams { + /** + * Additional fields to include in the response. See the `include` parameter for + * Response creation above for more information. + */ + include?: Array; +} + +Responses.InputItems = InputItems; + +export declare namespace Responses { + export { + type ComputerTool as ComputerTool, + type EasyInputMessage as EasyInputMessage, + type FileSearchTool as FileSearchTool, + type FunctionTool as FunctionTool, + type Response as Response, + type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, + type ResponseAudioDoneEvent as ResponseAudioDoneEvent, + type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, + type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, + type ResponseCodeInterpreterCallCodeDeltaEvent as ResponseCodeInterpreterCallCodeDeltaEvent, + type ResponseCodeInterpreterCallCodeDoneEvent as ResponseCodeInterpreterCallCodeDoneEvent, + type ResponseCodeInterpreterCallCompletedEvent as ResponseCodeInterpreterCallCompletedEvent, + type ResponseCodeInterpreterCallInProgressEvent as ResponseCodeInterpreterCallInProgressEvent, + type ResponseCodeInterpreterCallInterpretingEvent as ResponseCodeInterpreterCallInterpretingEvent, + type ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall, + type ResponseCompletedEvent as ResponseCompletedEvent, + type ResponseComputerToolCall as ResponseComputerToolCall, + type ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem, + type ResponseComputerToolCallOutputScreenshot as ResponseComputerToolCallOutputScreenshot, + type ResponseContent as ResponseContent, + type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, + type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, + type ResponseCreatedEvent as ResponseCreatedEvent, + type ResponseError as ResponseError, + type ResponseErrorEvent as ResponseErrorEvent, + type ResponseFailedEvent as ResponseFailedEvent, + type ResponseFileSearchCallCompletedEvent as ResponseFileSearchCallCompletedEvent, + type ResponseFileSearchCallInProgressEvent as ResponseFileSearchCallInProgressEvent, + type ResponseFileSearchCallSearchingEvent as ResponseFileSearchCallSearchingEvent, + type ResponseFileSearchToolCall as ResponseFileSearchToolCall, + type ResponseFormatTextConfig as ResponseFormatTextConfig, + type ResponseFormatTextJSONSchemaConfig as ResponseFormatTextJSONSchemaConfig, + type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, + type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, + type ResponseFunctionToolCall as ResponseFunctionToolCall, + type ResponseFunctionToolCallItem as ResponseFunctionToolCallItem, + type ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem, + type ResponseFunctionWebSearch as ResponseFunctionWebSearch, + type ResponseInProgressEvent as ResponseInProgressEvent, + type ResponseIncludable as ResponseIncludable, + type ResponseIncompleteEvent as ResponseIncompleteEvent, + type ResponseInput as ResponseInput, + type ResponseInputAudio as ResponseInputAudio, + type ResponseInputContent as ResponseInputContent, + type ResponseInputFile as ResponseInputFile, + type ResponseInputImage as ResponseInputImage, + type ResponseInputItem as ResponseInputItem, + type ResponseInputMessageContentList as ResponseInputMessageContentList, + type ResponseInputMessageItem as ResponseInputMessageItem, + type ResponseInputText as ResponseInputText, + type ResponseItem as ResponseItem, + type ResponseOutputAudio as ResponseOutputAudio, + type ResponseOutputItem as ResponseOutputItem, + type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, + type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, + type ResponseOutputMessage as ResponseOutputMessage, + type ResponseOutputRefusal as ResponseOutputRefusal, + type ResponseOutputText as ResponseOutputText, + type ResponseReasoningItem as ResponseReasoningItem, + type ResponseReasoningSummaryPartAddedEvent as ResponseReasoningSummaryPartAddedEvent, + type ResponseReasoningSummaryPartDoneEvent as ResponseReasoningSummaryPartDoneEvent, + type ResponseReasoningSummaryTextDeltaEvent as ResponseReasoningSummaryTextDeltaEvent, + type ResponseReasoningSummaryTextDoneEvent as ResponseReasoningSummaryTextDoneEvent, + type ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent, + type ResponseRefusalDoneEvent as ResponseRefusalDoneEvent, + type ResponseStatus as ResponseStatus, + type ResponseStreamEvent as ResponseStreamEvent, + type ResponseTextAnnotationDeltaEvent as ResponseTextAnnotationDeltaEvent, + type ResponseTextConfig as ResponseTextConfig, + type ResponseTextDeltaEvent as ResponseTextDeltaEvent, + type ResponseTextDoneEvent as ResponseTextDoneEvent, + type ResponseUsage as ResponseUsage, + type ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent, + type ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent, + type ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent, + type Tool as Tool, + type ToolChoiceFunction as ToolChoiceFunction, + type ToolChoiceOptions as ToolChoiceOptions, + type ToolChoiceTypes as ToolChoiceTypes, + type WebSearchTool as WebSearchTool, + type ResponseCreateParams as ResponseCreateParams, + type ResponseCreateParamsNonStreaming as ResponseCreateParamsNonStreaming, + type ResponseCreateParamsStreaming as ResponseCreateParamsStreaming, + type ResponseRetrieveParams as ResponseRetrieveParams, + }; + + export { + InputItems as InputItems, + type ResponseItemList as ResponseItemList, + type InputItemListParams as InputItemListParams, + }; +} diff --git a/src/resources/shared.ts b/src/resources/shared.ts index f44fda8a7..adea184fd 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,5 +1,114 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export type AllModels = + | (string & {}) + | ChatModel + | 'o1-pro' + | 'o1-pro-2025-03-19' + | 'computer-use-preview' + | 'computer-use-preview-2025-03-11'; + +export type ChatModel = + | 'gpt-4.1' + | 'gpt-4.1-mini' + | 'gpt-4.1-nano' + | 'gpt-4.1-2025-04-14' + | 'gpt-4.1-mini-2025-04-14' + | 'gpt-4.1-nano-2025-04-14' + | 'o4-mini' + | 'o4-mini-2025-04-16' + | 'o3' + | 'o3-2025-04-16' + | 'o3-mini' + | 'o3-mini-2025-01-31' + | 'o1' + | 'o1-2024-12-17' + | 'o1-preview' + | 'o1-preview-2024-09-12' + | 'o1-mini' + | 'o1-mini-2024-09-12' + | 'gpt-4o' + | 'gpt-4o-2024-11-20' + | 'gpt-4o-2024-08-06' + | 'gpt-4o-2024-05-13' + | 'gpt-4o-audio-preview' + | 'gpt-4o-audio-preview-2024-10-01' + | 'gpt-4o-audio-preview-2024-12-17' + | 'gpt-4o-mini-audio-preview' + | 'gpt-4o-mini-audio-preview-2024-12-17' + | 'gpt-4o-search-preview' + | 'gpt-4o-mini-search-preview' + | 'gpt-4o-search-preview-2025-03-11' + | 'gpt-4o-mini-search-preview-2025-03-11' + | 'chatgpt-4o-latest' + | 'codex-mini-latest' + | 'gpt-4o-mini' + | 'gpt-4o-mini-2024-07-18' + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0301' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613'; + +/** + * A filter used to compare a specified attribute key to a given value using a + * defined comparison operation. + */ +export interface ComparisonFilter { + /** + * The key to compare against the value. + */ + key: string; + + /** + * Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + * + * - `eq`: equals + * - `ne`: not equal + * - `gt`: greater than + * - `gte`: greater than or equal + * - `lt`: less than + * - `lte`: less than or equal + */ + type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte'; + + /** + * The value to compare against the attribute key; supports string, number, or + * boolean types. + */ + value: string | number | boolean; +} + +/** + * Combine multiple filters using `and` or `or`. + */ +export interface CompoundFilter { + /** + * Array of filters to combine. Items can be `ComparisonFilter` or + * `CompoundFilter`. + */ + filters: Array; + + /** + * Type of operation: `and` or `or`. + */ + type: 'and' | 'or'; +} + export interface ErrorObject { code: string | null; @@ -55,23 +164,93 @@ export interface FunctionDefinition { */ export type FunctionParameters = Record; +/** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ +export type Metadata = Record; + +/** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ +export interface Reasoning { + /** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + effort?: ReasoningEffort | null; + + /** + * @deprecated **Deprecated:** use `summary` instead. + * + * A summary of the reasoning performed by the model. This can be useful for + * debugging and understanding the model's reasoning process. One of `auto`, + * `concise`, or `detailed`. + */ + generate_summary?: 'auto' | 'concise' | 'detailed' | null; + + /** + * A summary of the reasoning performed by the model. This can be useful for + * debugging and understanding the model's reasoning process. One of `auto`, + * `concise`, or `detailed`. + */ + summary?: 'auto' | 'concise' | 'detailed' | null; +} + +/** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ +export type ReasoningEffort = 'low' | 'medium' | 'high' | null; + +/** + * JSON object response format. An older method of generating JSON responses. Using + * `json_schema` is recommended for models that support it. Note that the model + * will not generate JSON without a system or user message instructing it to do so. + */ export interface ResponseFormatJSONObject { /** - * The type of response format being defined: `json_object` + * The type of response format being defined. Always `json_object`. */ type: 'json_object'; } +/** + * JSON Schema response format. Used to generate structured JSON responses. Learn + * more about + * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + */ export interface ResponseFormatJSONSchema { + /** + * Structured Outputs configuration options, including a JSON Schema. + */ json_schema: ResponseFormatJSONSchema.JSONSchema; /** - * The type of response format being defined: `json_schema` + * The type of response format being defined. Always `json_schema`. */ type: 'json_schema'; } export namespace ResponseFormatJSONSchema { + /** + * Structured Outputs configuration options, including a JSON Schema. + */ export interface JSONSchema { /** * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores @@ -86,7 +265,8 @@ export namespace ResponseFormatJSONSchema { description?: string; /** - * The schema for the response format, described as a JSON Schema object. + * The schema for the response format, described as a JSON Schema object. Learn how + * to build JSON schemas [here](https://json-schema.org/). */ schema?: Record; @@ -101,9 +281,20 @@ export namespace ResponseFormatJSONSchema { } } +/** + * Default response format. Used to generate text responses. + */ export interface ResponseFormatText { /** - * The type of response format being defined: `text` + * The type of response format being defined. Always `text`. */ type: 'text'; } + +export type ResponsesModel = + | (string & {}) + | ChatModel + | 'o1-pro' + | 'o1-pro-2025-03-19' + | 'computer-use-preview' + | 'computer-use-preview-2025-03-11'; diff --git a/src/resources/uploads/index.ts b/src/resources/uploads/index.ts index 1a353d312..200d3567e 100644 --- a/src/resources/uploads/index.ts +++ b/src/resources/uploads/index.ts @@ -1,4 +1,4 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { Upload, UploadCreateParams, UploadCompleteParams, Uploads } from './uploads'; -export { UploadPart, PartCreateParams, Parts } from './parts'; +export { Parts, type UploadPart, type PartCreateParams } from './parts'; +export { Uploads, type Upload, type UploadCreateParams, type UploadCompleteParams } from './uploads'; diff --git a/src/resources/uploads/parts.ts b/src/resources/uploads/parts.ts index a4af5c606..9b54c99e6 100644 --- a/src/resources/uploads/parts.ts +++ b/src/resources/uploads/parts.ts @@ -2,7 +2,6 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as PartsAPI from './parts'; export class Parts extends APIResource { /** @@ -62,7 +61,6 @@ export interface PartCreateParams { data: Core.Uploadable; } -export namespace Parts { - export import UploadPart = PartsAPI.UploadPart; - export import PartCreateParams = PartsAPI.PartCreateParams; +export declare namespace Parts { + export { type UploadPart as UploadPart, type PartCreateParams as PartCreateParams }; } diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index 1c3ed708d..9e046b48d 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -2,9 +2,9 @@ import { APIResource } from '../../resource'; import * as Core from '../../core'; -import * as UploadsAPI from './uploads'; import * as FilesAPI from '../files'; import * as PartsAPI from './parts'; +import { PartCreateParams, Parts, UploadPart } from './parts'; export class Uploads extends APIResource { parts: PartsAPI.Parts = new PartsAPI.Parts(this._client); @@ -22,10 +22,9 @@ export class Uploads extends APIResource { * contains all the parts you uploaded. This File is usable in the rest of our * platform as a regular File object. * - * For certain `purpose`s, the correct `mime_type` must be specified. Please refer - * to documentation for the supported MIME types for your use case: - * - * - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files) + * For certain `purpose` values, the correct `mime_type` must be specified. Please + * refer to documentation for the + * [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). * * For guidance on the proper filename extensions for each purpose, please follow * the documentation on @@ -86,7 +85,7 @@ export interface Upload { created_at: number; /** - * The Unix timestamp (in seconds) for when the Upload was created. + * The Unix timestamp (in seconds) for when the Upload will expire. */ expires_at: number; @@ -113,7 +112,7 @@ export interface Upload { status: 'pending' | 'completed' | 'cancelled' | 'expired'; /** - * The ready File object after the Upload is completed. + * The `File` object represents a document that has been uploaded to OpenAI. */ file?: FilesAPI.FileObject | null; } @@ -159,11 +158,14 @@ export interface UploadCompleteParams { md5?: string; } -export namespace Uploads { - export import Upload = UploadsAPI.Upload; - export import UploadCreateParams = UploadsAPI.UploadCreateParams; - export import UploadCompleteParams = UploadsAPI.UploadCompleteParams; - export import Parts = PartsAPI.Parts; - export import UploadPart = PartsAPI.UploadPart; - export import PartCreateParams = PartsAPI.PartCreateParams; +Uploads.Parts = Parts; + +export declare namespace Uploads { + export { + type Upload as Upload, + type UploadCreateParams as UploadCreateParams, + type UploadCompleteParams as UploadCompleteParams, + }; + + export { Parts as Parts, type UploadPart as UploadPart, type PartCreateParams as PartCreateParams }; } diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts similarity index 88% rename from src/resources/beta/vector-stores/file-batches.ts rename to src/resources/vector-stores/file-batches.ts index 3436d7575..9be1d81a3 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/vector-stores/file-batches.ts @@ -1,16 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { isRequestOptions } from '../../../core'; -import { sleep } from '../../../core'; -import { Uploadable } from '../../../core'; -import { allSettledWithThrow } from '../../../lib/Util'; -import * as Core from '../../../core'; -import * as FileBatchesAPI from './file-batches'; +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import { sleep } from '../../core'; +import { Uploadable } from '../../core'; +import { allSettledWithThrow } from '../../lib/Util'; +import * as Core from '../../core'; import * as FilesAPI from './files'; import { VectorStoreFilesPage } from './files'; import * as VectorStoresAPI from './vector-stores'; -import { type CursorPageParams } from '../../../pagination'; +import { type CursorPageParams } from '../../pagination'; export class FileBatches extends APIResource { /** @@ -266,6 +265,15 @@ export interface FileBatchCreateParams { */ file_ids: Array; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. @@ -277,8 +285,8 @@ export interface FileBatchListFilesParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; @@ -294,10 +302,12 @@ export interface FileBatchListFilesParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace FileBatches { - export import VectorStoreFileBatch = FileBatchesAPI.VectorStoreFileBatch; - export import FileBatchCreateParams = FileBatchesAPI.FileBatchCreateParams; - export import FileBatchListFilesParams = FileBatchesAPI.FileBatchListFilesParams; +export declare namespace FileBatches { + export { + type VectorStoreFileBatch as VectorStoreFileBatch, + type FileBatchCreateParams as FileBatchCreateParams, + type FileBatchListFilesParams as FileBatchListFilesParams, + }; } export { VectorStoreFilesPage }; diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/vector-stores/files.ts similarity index 70% rename from src/resources/beta/vector-stores/files.ts rename to src/resources/vector-stores/files.ts index f82cd63df..28caf9781 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/vector-stores/files.ts @@ -1,11 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { sleep, Uploadable, isRequestOptions } from '../../../core'; -import * as Core from '../../../core'; -import * as FilesAPI from './files'; +import { APIResource } from '../../resource'; +import { sleep, Uploadable, isRequestOptions } from '../../core'; +import * as Core from '../../core'; import * as VectorStoresAPI from './vector-stores'; -import { CursorPage, type CursorPageParams } from '../../../pagination'; +import { CursorPage, type CursorPageParams, Page } from '../../pagination'; export class Files extends APIResource { /** @@ -39,6 +38,22 @@ export class Files extends APIResource { }); } + /** + * Update attributes on a vector store file. + */ + update( + vectorStoreId: string, + fileId: string, + body: FileUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/vector_stores/${vectorStoreId}/files/${fileId}`, { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + /** * Returns a list of vector store files. */ @@ -168,10 +183,30 @@ export class Files extends APIResource { const fileInfo = await this.upload(vectorStoreId, file, options); return await this.poll(vectorStoreId, fileInfo.id, options); } + + /** + * Retrieve the parsed contents of a vector store file. + */ + content( + vectorStoreId: string, + fileId: string, + options?: Core.RequestOptions, + ): Core.PagePromise { + return this._client.getAPIList( + `/vector_stores/${vectorStoreId}/files/${fileId}/content`, + FileContentResponsesPage, + { ...options, headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers } }, + ); + } } export class VectorStoreFilesPage extends CursorPage {} +/** + * Note: no pagination actually occurs yet, this is for forwards-compatibility. + */ +export class FileContentResponsesPage extends Page {} + /** * A list of files attached to a vector store. */ @@ -218,6 +253,15 @@ export interface VectorStoreFile { */ vector_store_id: string; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The strategy used to chunk the file. */ @@ -250,6 +294,18 @@ export interface VectorStoreFileDeleted { object: 'vector_store.file.deleted'; } +export interface FileContentResponse { + /** + * The text content + */ + text?: string; + + /** + * The content type (currently only `"text"`) + */ + type?: string; +} + export interface FileCreateParams { /** * A [File](https://platform.openai.com/docs/api-reference/files) ID that the @@ -258,6 +314,15 @@ export interface FileCreateParams { */ file_id: string; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. @@ -265,12 +330,23 @@ export interface FileCreateParams { chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; } +export interface FileUpdateParams { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes: Record | null; +} + export interface FileListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; @@ -286,10 +362,18 @@ export interface FileListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace Files { - export import VectorStoreFile = FilesAPI.VectorStoreFile; - export import VectorStoreFileDeleted = FilesAPI.VectorStoreFileDeleted; - export import VectorStoreFilesPage = FilesAPI.VectorStoreFilesPage; - export import FileCreateParams = FilesAPI.FileCreateParams; - export import FileListParams = FilesAPI.FileListParams; +Files.VectorStoreFilesPage = VectorStoreFilesPage; +Files.FileContentResponsesPage = FileContentResponsesPage; + +export declare namespace Files { + export { + type VectorStoreFile as VectorStoreFile, + type VectorStoreFileDeleted as VectorStoreFileDeleted, + type FileContentResponse as FileContentResponse, + VectorStoreFilesPage as VectorStoreFilesPage, + FileContentResponsesPage as FileContentResponsesPage, + type FileCreateParams as FileCreateParams, + type FileUpdateParams as FileUpdateParams, + type FileListParams as FileListParams, + }; } diff --git a/src/resources/vector-stores/index.ts b/src/resources/vector-stores/index.ts new file mode 100644 index 000000000..9cbcbc0b2 --- /dev/null +++ b/src/resources/vector-stores/index.ts @@ -0,0 +1,38 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + FileBatches, + type VectorStoreFileBatch, + type FileBatchCreateParams, + type FileBatchListFilesParams, +} from './file-batches'; +export { + VectorStoreFilesPage, + FileContentResponsesPage, + Files, + type VectorStoreFile, + type VectorStoreFileDeleted, + type FileContentResponse, + type FileCreateParams, + type FileUpdateParams, + type FileListParams, +} from './files'; +export { + VectorStoresPage, + VectorStoreSearchResponsesPage, + VectorStores, + type AutoFileChunkingStrategyParam, + type FileChunkingStrategy, + type FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyObjectParam, + type VectorStore, + type VectorStoreDeleted, + type VectorStoreSearchResponse, + type VectorStoreCreateParams, + type VectorStoreUpdateParams, + type VectorStoreListParams, + type VectorStoreSearchParams, +} from './vector-stores'; diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts similarity index 58% rename from src/resources/beta/vector-stores/vector-stores.ts rename to src/resources/vector-stores/vector-stores.ts index 3c9aa707d..7d61e7fd6 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -1,12 +1,29 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { isRequestOptions } from '../../../core'; -import * as Core from '../../../core'; -import * as VectorStoresAPI from './vector-stores'; +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as Shared from '../shared'; import * as FileBatchesAPI from './file-batches'; +import { + FileBatchCreateParams, + FileBatchListFilesParams, + FileBatches, + VectorStoreFileBatch, +} from './file-batches'; import * as FilesAPI from './files'; -import { CursorPage, type CursorPageParams } from '../../../pagination'; +import { + FileContentResponse, + FileContentResponsesPage, + FileCreateParams, + FileListParams, + FileUpdateParams, + Files, + VectorStoreFile, + VectorStoreFileDeleted, + VectorStoreFilesPage, +} from './files'; +import { CursorPage, type CursorPageParams, Page } from '../../pagination'; export class VectorStores extends APIResource { files: FilesAPI.Files = new FilesAPI.Files(this._client); @@ -79,10 +96,32 @@ export class VectorStores extends APIResource { headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } + + /** + * Search a vector store for relevant chunks based on a query and file attributes + * filter. + */ + search( + vectorStoreId: string, + body: VectorStoreSearchParams, + options?: Core.RequestOptions, + ): Core.PagePromise { + return this._client.getAPIList(`/vector_stores/${vectorStoreId}/search`, VectorStoreSearchResponsesPage, { + body, + method: 'post', + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } } export class VectorStoresPage extends CursorPage {} +/** + * Note: no pagination actually occurs yet, this is for forwards-compatibility. + */ +export class VectorStoreSearchResponsesPage extends Page {} + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of * `800` and `chunk_overlap_tokens` of `400`. @@ -103,7 +142,7 @@ export type FileChunkingStrategy = StaticFileChunkingStrategyObject | OtherFileC * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. */ -export type FileChunkingStrategyParam = AutoFileChunkingStrategyParam | StaticFileChunkingStrategyParam; +export type FileChunkingStrategyParam = AutoFileChunkingStrategyParam | StaticFileChunkingStrategyObjectParam; /** * This is returned when the chunking strategy is unknown. Typically, this is @@ -141,7 +180,10 @@ export interface StaticFileChunkingStrategyObject { type: 'static'; } -export interface StaticFileChunkingStrategyParam { +/** + * Customize your own chunking strategy by setting chunk size and chunk overlap. + */ +export interface StaticFileChunkingStrategyObjectParam { static: StaticFileChunkingStrategy; /** @@ -174,11 +216,13 @@ export interface VectorStore { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata: unknown | null; + metadata: Shared.Metadata | null; /** * The name of the vector store. @@ -266,6 +310,51 @@ export interface VectorStoreDeleted { object: 'vector_store.deleted'; } +export interface VectorStoreSearchResponse { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes: Record | null; + + /** + * Content chunks from the file. + */ + content: Array; + + /** + * The ID of the vector store file. + */ + file_id: string; + + /** + * The name of the vector store file. + */ + filename: string; + + /** + * The similarity score for the result. + */ + score: number; +} + +export namespace VectorStoreSearchResponse { + export interface Content { + /** + * The text content returned from search. + */ + text: string; + + /** + * The type of content. + */ + type: 'text'; + } +} + export interface VectorStoreCreateParams { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` @@ -287,11 +376,13 @@ export interface VectorStoreCreateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The name of the vector store. @@ -325,11 +416,13 @@ export interface VectorStoreUpdateParams { /** * Set of 16 key-value pairs that can be attached to an object. This can be useful - * for storing additional information about the object in a structured format. Keys - * can be a maximum of 64 characters long and values can be a maxium of 512 - * characters long. + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. */ - metadata?: unknown | null; + metadata?: Shared.Metadata | null; /** * The name of the vector store. @@ -359,8 +452,8 @@ export interface VectorStoreListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place * in the list. For instance, if you make a list request and receive 100 objects, - * ending with obj_foo, your subsequent call can include before=obj_foo in order to - * fetch the previous page of the list. + * starting with obj_foo, your subsequent call can include before=obj_foo in order + * to fetch the previous page of the list. */ before?: string; @@ -371,28 +464,88 @@ export interface VectorStoreListParams extends CursorPageParams { order?: 'asc' | 'desc'; } -export namespace VectorStores { - export import AutoFileChunkingStrategyParam = VectorStoresAPI.AutoFileChunkingStrategyParam; - export import FileChunkingStrategy = VectorStoresAPI.FileChunkingStrategy; - export import FileChunkingStrategyParam = VectorStoresAPI.FileChunkingStrategyParam; - export import OtherFileChunkingStrategyObject = VectorStoresAPI.OtherFileChunkingStrategyObject; - export import StaticFileChunkingStrategy = VectorStoresAPI.StaticFileChunkingStrategy; - export import StaticFileChunkingStrategyObject = VectorStoresAPI.StaticFileChunkingStrategyObject; - export import StaticFileChunkingStrategyParam = VectorStoresAPI.StaticFileChunkingStrategyParam; - export import VectorStore = VectorStoresAPI.VectorStore; - export import VectorStoreDeleted = VectorStoresAPI.VectorStoreDeleted; - export import VectorStoresPage = VectorStoresAPI.VectorStoresPage; - export import VectorStoreCreateParams = VectorStoresAPI.VectorStoreCreateParams; - export import VectorStoreUpdateParams = VectorStoresAPI.VectorStoreUpdateParams; - export import VectorStoreListParams = VectorStoresAPI.VectorStoreListParams; - export import Files = FilesAPI.Files; - export import VectorStoreFile = FilesAPI.VectorStoreFile; - export import VectorStoreFileDeleted = FilesAPI.VectorStoreFileDeleted; - export import VectorStoreFilesPage = FilesAPI.VectorStoreFilesPage; - export import FileCreateParams = FilesAPI.FileCreateParams; - export import FileListParams = FilesAPI.FileListParams; - export import FileBatches = FileBatchesAPI.FileBatches; - export import VectorStoreFileBatch = FileBatchesAPI.VectorStoreFileBatch; - export import FileBatchCreateParams = FileBatchesAPI.FileBatchCreateParams; - export import FileBatchListFilesParams = FileBatchesAPI.FileBatchListFilesParams; +export interface VectorStoreSearchParams { + /** + * A query string for a search + */ + query: string | Array; + + /** + * A filter to apply based on file attributes. + */ + filters?: Shared.ComparisonFilter | Shared.CompoundFilter; + + /** + * The maximum number of results to return. This number should be between 1 and 50 + * inclusive. + */ + max_num_results?: number; + + /** + * Ranking options for search. + */ + ranking_options?: VectorStoreSearchParams.RankingOptions; + + /** + * Whether to rewrite the natural language query for vector search. + */ + rewrite_query?: boolean; +} + +export namespace VectorStoreSearchParams { + /** + * Ranking options for search. + */ + export interface RankingOptions { + ranker?: 'auto' | 'default-2024-11-15'; + + score_threshold?: number; + } +} + +VectorStores.VectorStoresPage = VectorStoresPage; +VectorStores.VectorStoreSearchResponsesPage = VectorStoreSearchResponsesPage; +VectorStores.Files = Files; +VectorStores.VectorStoreFilesPage = VectorStoreFilesPage; +VectorStores.FileContentResponsesPage = FileContentResponsesPage; +VectorStores.FileBatches = FileBatches; + +export declare namespace VectorStores { + export { + type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, + type FileChunkingStrategy as FileChunkingStrategy, + type FileChunkingStrategyParam as FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy as StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, + type VectorStore as VectorStore, + type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreSearchResponse as VectorStoreSearchResponse, + VectorStoresPage as VectorStoresPage, + VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, + type VectorStoreCreateParams as VectorStoreCreateParams, + type VectorStoreUpdateParams as VectorStoreUpdateParams, + type VectorStoreListParams as VectorStoreListParams, + type VectorStoreSearchParams as VectorStoreSearchParams, + }; + + export { + Files as Files, + type VectorStoreFile as VectorStoreFile, + type VectorStoreFileDeleted as VectorStoreFileDeleted, + type FileContentResponse as FileContentResponse, + VectorStoreFilesPage as VectorStoreFilesPage, + FileContentResponsesPage as FileContentResponsesPage, + type FileCreateParams as FileCreateParams, + type FileUpdateParams as FileUpdateParams, + type FileListParams as FileListParams, + }; + + export { + FileBatches as FileBatches, + type VectorStoreFileBatch as VectorStoreFileBatch, + type FileBatchCreateParams as FileBatchCreateParams, + type FileBatchListFilesParams as FileBatchListFilesParams, + }; } diff --git a/src/streaming.ts b/src/streaming.ts index 597ee89fa..ee25daca6 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -1,8 +1,10 @@ import { ReadableStream, type Response } from './_shims/index'; import { OpenAIError } from './error'; -import { LineDecoder } from './internal/decoders/line'; +import { findDoubleNewlineIndex, LineDecoder } from './internal/decoders/line'; +import { ReadableStreamToAsyncIterable } from './internal/stream-utils'; -import { APIError } from 'openai/error'; +import { createResponseHeaders } from './core'; +import { APIError } from './error'; type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; @@ -22,7 +24,7 @@ export class Stream implements AsyncIterable { this.controller = controller; } - static fromSSEResponse(response: Response, controller: AbortController) { + static fromSSEResponse(response: Response, controller: AbortController): Stream { let consumed = false; async function* iterator(): AsyncIterator { @@ -40,7 +42,11 @@ export class Stream implements AsyncIterable { continue; } - if (sse.event === null) { + if ( + sse.event === null || + sse.event.startsWith('response.') || + sse.event.startsWith('transcript.') + ) { let data; try { @@ -52,7 +58,7 @@ export class Stream implements AsyncIterable { } if (data && data.error) { - throw new APIError(undefined, data.error, undefined, undefined); + throw new APIError(undefined, data.error, undefined, createResponseHeaders(response.headers)); } yield data; @@ -90,13 +96,13 @@ export class Stream implements AsyncIterable { * Generates a Stream from a newline-separated ReadableStream * where each item is a JSON value. */ - static fromReadableStream(readableStream: ReadableStream, controller: AbortController) { + static fromReadableStream(readableStream: ReadableStream, controller: AbortController): Stream { let consumed = false; async function* iterLines(): AsyncGenerator { const lineDecoder = new LineDecoder(); - const iter = readableStreamAsyncIterable(readableStream); + const iter = ReadableStreamToAsyncIterable(readableStream); for await (const chunk of iter) { for (const line of lineDecoder.decode(chunk)) { yield line; @@ -210,7 +216,7 @@ export async function* _iterSSEMessages( const sseDecoder = new SSEDecoder(); const lineDecoder = new LineDecoder(); - const iter = readableStreamAsyncIterable(response.body); + const iter = ReadableStreamToAsyncIterable(response.body); for await (const sseChunk of iterSSEChunks(iter)) { for (const line of lineDecoder.decode(sseChunk)) { const sse = sseDecoder.decode(line); @@ -258,37 +264,6 @@ async function* iterSSEChunks(iterator: AsyncIterableIterator): AsyncGene } } -function findDoubleNewlineIndex(buffer: Uint8Array): number { - // This function searches the buffer for the end patterns (\r\r, \n\n, \r\n\r\n) - // and returns the index right after the first occurrence of any pattern, - // or -1 if none of the patterns are found. - const newline = 0x0a; // \n - const carriage = 0x0d; // \r - - for (let i = 0; i < buffer.length - 2; i++) { - if (buffer[i] === newline && buffer[i + 1] === newline) { - // \n\n - return i + 2; - } - if (buffer[i] === carriage && buffer[i + 1] === carriage) { - // \r\r - return i + 2; - } - if ( - buffer[i] === carriage && - buffer[i + 1] === newline && - i + 3 < buffer.length && - buffer[i + 2] === carriage && - buffer[i + 3] === newline - ) { - // \r\n\r\n - return i + 4; - } - } - - return -1; -} - class SSEDecoder { private data: string[]; private event: string | null; @@ -344,17 +319,6 @@ class SSEDecoder { } } -/** This is an internal helper function that's just used for testing */ -export function _decodeChunks(chunks: string[]): string[] { - const decoder = new LineDecoder(); - const lines: string[] = []; - for (const chunk of chunks) { - lines.push(...decoder.decode(chunk)); - } - - return lines; -} - function partition(str: string, delimiter: string): [string, string, string] { const index = str.indexOf(delimiter); if (index !== -1) { @@ -363,36 +327,3 @@ function partition(str: string, delimiter: string): [string, string, string] { return [str, '', '']; } - -/** - * Most browsers don't yet have async iterable support for ReadableStream, - * and Node has a very different way of reading bytes from its "ReadableStream". - * - * This polyfill was pulled from https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490 - */ -export function readableStreamAsyncIterable(stream: any): AsyncIterableIterator { - if (stream[Symbol.asyncIterator]) return stream; - - const reader = stream.getReader(); - return { - async next() { - try { - const result = await reader.read(); - if (result?.done) reader.releaseLock(); // release lock when stream becomes closed - return result; - } catch (e) { - reader.releaseLock(); // release lock when stream becomes errored - throw e; - } - }, - async return() { - const cancelPromise = reader.cancel(); - reader.releaseLock(); - await cancelPromise; - return { done: true, value: undefined }; - }, - [Symbol.asyncIterator]() { - return this; - }, - }; -} diff --git a/src/version.ts b/src/version.ts index 174c31111..62b43ffce 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.67.3'; // x-release-please-version +export const VERSION = '4.100.0'; // x-release-please-version diff --git a/tests/api-resources/audio/speech.test.ts b/tests/api-resources/audio/speech.test.ts index 904d75e5d..191c6a313 100644 --- a/tests/api-resources/audio/speech.test.ts +++ b/tests/api-resources/audio/speech.test.ts @@ -13,7 +13,8 @@ describe('resource speech', () => { const response = await client.audio.speech.create({ input: 'input', model: 'string', - voice: 'alloy', + voice: 'ash', + instructions: 'instructions', response_format: 'mp3', speed: 0.25, }); diff --git a/tests/api-resources/audio/transcriptions.test.ts b/tests/api-resources/audio/transcriptions.test.ts index ef2797911..ad76808d0 100644 --- a/tests/api-resources/audio/transcriptions.test.ts +++ b/tests/api-resources/audio/transcriptions.test.ts @@ -12,7 +12,7 @@ describe('resource transcriptions', () => { test('create: only required params', async () => { const responsePromise = client.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'whisper-1', + model: 'gpt-4o-transcribe', }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -26,12 +26,15 @@ describe('resource transcriptions', () => { test('create: required and optional params', async () => { const response = await client.audio.transcriptions.create({ file: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'whisper-1', + model: 'gpt-4o-transcribe', + chunking_strategy: 'auto', + include: ['logprobs'], language: 'language', prompt: 'prompt', response_format: 'json', + stream: false, temperature: 0, - timestamp_granularities: ['word', 'segment'], + timestamp_granularities: ['word'], }); }); }); diff --git a/tests/api-resources/batches.test.ts b/tests/api-resources/batches.test.ts index 96e200fb9..7c7397d06 100644 --- a/tests/api-resources/batches.test.ts +++ b/tests/api-resources/batches.test.ts @@ -12,7 +12,7 @@ describe('resource batches', () => { test('create: only required params', async () => { const responsePromise = client.batches.create({ completion_window: '24h', - endpoint: '/v1/chat/completions', + endpoint: '/v1/responses', input_file_id: 'input_file_id', }); const rawResponse = await responsePromise.asResponse(); @@ -27,7 +27,7 @@ describe('resource batches', () => { test('create: required and optional params', async () => { const response = await client.batches.create({ completion_window: '24h', - endpoint: '/v1/chat/completions', + endpoint: '/v1/responses', input_file_id: 'input_file_id', metadata: { foo: 'string' }, }); diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts index fdc325254..16bc9f942 100644 --- a/tests/api-resources/beta/assistants.test.ts +++ b/tests/api-resources/beta/assistants.test.ts @@ -25,20 +25,21 @@ describe('resource assistants', () => { model: 'gpt-4o', description: 'description', instructions: 'instructions', - metadata: {}, + metadata: { foo: 'string' }, name: 'name', + reasoning_effort: 'low', response_format: 'auto', temperature: 1, tool_resources: { - code_interpreter: { file_ids: ['string', 'string', 'string'] }, + code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], vector_stores: [ - { chunking_strategy: { type: 'auto' }, file_ids: ['string', 'string', 'string'], metadata: {} }, + { chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: { foo: 'string' } }, ], }, }, - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + tools: [{ type: 'code_interpreter' }], top_p: 1, }); }); diff --git a/tests/api-resources/beta/realtime/sessions.test.ts b/tests/api-resources/beta/realtime/sessions.test.ts new file mode 100644 index 000000000..dbb92ead3 --- /dev/null +++ b/tests/api-resources/beta/realtime/sessions.test.ts @@ -0,0 +1,22 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource sessions', () => { + test('create', async () => { + const responsePromise = client.beta.realtime.sessions.create({}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); +}); diff --git a/tests/api-resources/beta/realtime/transcription-sessions.test.ts b/tests/api-resources/beta/realtime/transcription-sessions.test.ts new file mode 100644 index 000000000..d52ce2403 --- /dev/null +++ b/tests/api-resources/beta/realtime/transcription-sessions.test.ts @@ -0,0 +1,22 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource transcriptionSessions', () => { + test('create', async () => { + const responsePromise = client.beta.realtime.transcriptionSessions.create({}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); +}); diff --git a/tests/api-resources/beta/threads/messages.test.ts b/tests/api-resources/beta/threads/messages.test.ts index bfbcab1cb..e125edd84 100644 --- a/tests/api-resources/beta/threads/messages.test.ts +++ b/tests/api-resources/beta/threads/messages.test.ts @@ -27,21 +27,8 @@ describe('resource messages', () => { const response = await client.beta.threads.messages.create('thread_id', { content: 'string', role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], - }, - { - file_id: 'file_id', - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], - }, - { - file_id: 'file_id', - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], - }, - ], - metadata: {}, + attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], + metadata: { foo: 'string' }, }); }); diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts index 352d775c0..4b2b8030b 100644 --- a/tests/api-resources/beta/threads/runs/runs.test.ts +++ b/tests/api-resources/beta/threads/runs/runs.test.ts @@ -29,108 +29,22 @@ describe('resource runs', () => { { content: 'string', role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, + attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], + metadata: { foo: 'string' }, }, ], instructions: 'instructions', max_completion_tokens: 256, max_prompt_tokens: 256, - metadata: {}, - model: 'gpt-4o', + metadata: { foo: 'string' }, + model: 'string', parallel_tool_calls: true, + reasoning_effort: 'low', response_format: 'auto', stream: false, temperature: 1, tool_choice: 'none', - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + tools: [{ type: 'code_interpreter' }], top_p: 1, truncation_strategy: { type: 'auto', last_messages: 1 }, }); @@ -214,7 +128,7 @@ describe('resource runs', () => { test('submitToolOutputs: only required params', async () => { const responsePromise = client.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { - tool_outputs: [{}, {}, {}], + tool_outputs: [{}], }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); @@ -227,11 +141,7 @@ describe('resource runs', () => { test('submitToolOutputs: required and optional params', async () => { const response = await client.beta.threads.runs.submitToolOutputs('thread_id', 'run_id', { - tool_outputs: [ - { output: 'output', tool_call_id: 'tool_call_id' }, - { output: 'output', tool_call_id: 'tool_call_id' }, - { output: 'output', tool_call_id: 'tool_call_id' }, - ], + tool_outputs: [{ output: 'output', tool_call_id: 'tool_call_id' }], stream: false, }); }); diff --git a/tests/api-resources/beta/threads/threads.test.ts b/tests/api-resources/beta/threads/threads.test.ts index dc0a94a7d..bc92a0c8a 100644 --- a/tests/api-resources/beta/threads/threads.test.ts +++ b/tests/api-resources/beta/threads/threads.test.ts @@ -36,108 +36,17 @@ describe('resource threads', () => { { content: 'string', role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, + attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], + metadata: { foo: 'string' }, }, ], - metadata: {}, + metadata: { foo: 'string' }, tool_resources: { - code_interpreter: { file_ids: ['string', 'string', 'string'] }, + code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], vector_stores: [ - { - chunking_strategy: { type: 'auto' }, - file_ids: ['string', 'string', 'string'], - metadata: {}, - }, + { chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: { foo: 'string' } }, ], }, }, @@ -211,8 +120,8 @@ describe('resource threads', () => { instructions: 'instructions', max_completion_tokens: 256, max_prompt_tokens: 256, - metadata: {}, - model: 'gpt-4o', + metadata: { foo: 'string' }, + model: 'string', parallel_tool_calls: true, response_format: 'auto', stream: false, @@ -222,114 +131,27 @@ describe('resource threads', () => { { content: 'string', role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, - }, - { - content: 'string', - role: 'user', - attachments: [ - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - { - file_id: 'file_id', - tools: [ - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - { type: 'code_interpreter' }, - ], - }, - ], - metadata: {}, + attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }], + metadata: { foo: 'string' }, }, ], - metadata: {}, + metadata: { foo: 'string' }, tool_resources: { - code_interpreter: { file_ids: ['string', 'string', 'string'] }, + code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'], vector_stores: [ - { chunking_strategy: { type: 'auto' }, file_ids: ['string', 'string', 'string'], metadata: {} }, + { chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: { foo: 'string' } }, ], }, }, }, tool_choice: 'none', tool_resources: { - code_interpreter: { file_ids: ['string', 'string', 'string'] }, + code_interpreter: { file_ids: ['string'] }, file_search: { vector_store_ids: ['string'] }, }, - tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }], + tools: [{ type: 'code_interpreter' }], top_p: 1, truncation_strategy: { type: 'auto', last_messages: 1 }, }); diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts deleted file mode 100644 index 4f015b47e..000000000 --- a/tests/api-resources/chat/completions.test.ts +++ /dev/null @@ -1,69 +0,0 @@ -// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import OpenAI from 'openai'; -import { Response } from 'node-fetch'; - -const client = new OpenAI({ - apiKey: 'My API Key', - baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', -}); - -describe('resource completions', () => { - test('create: only required params', async () => { - const responsePromise = client.chat.completions.create({ - messages: [{ content: 'string', role: 'system' }], - model: 'gpt-4o', - }); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('create: required and optional params', async () => { - const response = await client.chat.completions.create({ - messages: [{ content: 'string', role: 'system', name: 'name' }], - model: 'gpt-4o', - frequency_penalty: -2, - function_call: 'none', - functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], - logit_bias: { foo: 0 }, - logprobs: true, - max_completion_tokens: 0, - max_tokens: 0, - metadata: { foo: 'string' }, - n: 1, - parallel_tool_calls: true, - presence_penalty: -2, - response_format: { type: 'text' }, - seed: -9007199254740991, - service_tier: 'auto', - stop: 'string', - store: true, - stream: false, - stream_options: { include_usage: true }, - temperature: 1, - tool_choice: 'none', - tools: [ - { - function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, - type: 'function', - }, - { - function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, - type: 'function', - }, - { - function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, - type: 'function', - }, - ], - top_logprobs: 0, - top_p: 1, - user: 'user-1234', - }); - }); -}); diff --git a/tests/api-resources/chat/completions/completions.test.ts b/tests/api-resources/chat/completions/completions.test.ts new file mode 100644 index 000000000..60c23591a --- /dev/null +++ b/tests/api-resources/chat/completions/completions.test.ts @@ -0,0 +1,151 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource completions', () => { + test('create: only required params', async () => { + const responsePromise = client.chat.completions.create({ + messages: [{ content: 'string', role: 'developer' }], + model: 'gpt-4o', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.chat.completions.create({ + messages: [{ content: 'string', role: 'developer', name: 'name' }], + model: 'gpt-4o', + audio: { format: 'wav', voice: 'ash' }, + frequency_penalty: -2, + function_call: 'none', + functions: [{ name: 'name', description: 'description', parameters: { foo: 'bar' } }], + logit_bias: { foo: 0 }, + logprobs: true, + max_completion_tokens: 0, + max_tokens: 0, + metadata: { foo: 'string' }, + modalities: ['text'], + n: 1, + parallel_tool_calls: true, + prediction: { content: 'string', type: 'content' }, + presence_penalty: -2, + reasoning_effort: 'low', + response_format: { type: 'text' }, + seed: -9007199254740991, + service_tier: 'auto', + stop: '\n', + store: true, + stream: false, + stream_options: { include_usage: true }, + temperature: 1, + tool_choice: 'none', + tools: [ + { + function: { name: 'name', description: 'description', parameters: { foo: 'bar' }, strict: true }, + type: 'function', + }, + ], + top_logprobs: 0, + top_p: 1, + user: 'user-1234', + web_search_options: { + search_context_size: 'low', + user_location: { + approximate: { city: 'city', country: 'country', region: 'region', timezone: 'timezone' }, + type: 'approximate', + }, + }, + }); + }); + + test('retrieve', async () => { + const responsePromise = client.chat.completions.retrieve('completion_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.retrieve('completion_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('update: only required params', async () => { + const responsePromise = client.chat.completions.update('completion_id', { metadata: { foo: 'string' } }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('update: required and optional params', async () => { + const response = await client.chat.completions.update('completion_id', { metadata: { foo: 'string' } }); + }); + + test('list', async () => { + const responsePromise = client.chat.completions.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.chat.completions.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.list( + { after: 'after', limit: 0, metadata: { foo: 'string' }, model: 'model', order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.chat.completions.del('completion_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.del('completion_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/chat/completions/messages.test.ts b/tests/api-resources/chat/completions/messages.test.ts new file mode 100644 index 000000000..664106cb9 --- /dev/null +++ b/tests/api-resources/chat/completions/messages.test.ts @@ -0,0 +1,40 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource messages', () => { + test('list', async () => { + const responsePromise = client.chat.completions.messages.list('completion_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.messages.list('completion_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.chat.completions.messages.list( + 'completion_id', + { after: 'after', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts index 82322dc3a..c98501a87 100644 --- a/tests/api-resources/completions.test.ts +++ b/tests/api-resources/completions.test.ts @@ -32,7 +32,7 @@ describe('resource completions', () => { max_tokens: 16, n: 1, presence_penalty: -2, - seed: -9007199254740991, + seed: 0, stop: '\n', stream: false, stream_options: { include_usage: true }, diff --git a/tests/api-resources/embeddings-base64-response.json b/tests/api-resources/embeddings-base64-response.json new file mode 100644 index 000000000..9b0f7629c --- /dev/null +++ b/tests/api-resources/embeddings-base64-response.json @@ -0,0 +1 @@ +{"object":"list","data":[{"object":"embedding","index":0,"embedding":"A1fLvaC4Bb0QB7w8yEvrPOm9Xj2r0yA8EW4sPRq75j3Fbiq81/chPumAGb0afqG8R6AFvpzsQT35SPO7Hi39PEMAir1lf0A92McfvRoVlLxQv9o9tHqIvQYlrL0fwlK8sufPPYz2gjzH5Ho93GebvN+eCTxjRjW8PJRKvXMtFD4+n3C9ByMPO39Gkjs1Jm49A1fLPdNXpjv8RLm92McfveKpLz01VNO9SUIevhAHvD0flG09+9srvW5j7Txp8dY8LW4Ju08bJb1GdL29g+aNPWlLBD1p8dY8LkCkvfPLtjxcBj4+1/ehPebv/bz/Ifo8SqkOvREFHzyAr588HbUPPbFS+r00gri825WAPQlcGj1qHZ+8o8EOPo880Tn5dli9zRUSPc2APD0b5RG9mhxEvTyUSj3FQMU95u/9vE20tD3wwBC94NmxvXSUhL3Ofh8904WLPRbeJb2Paja8BClmvhwgOj2e6Ic9em0LPdj1BD3lSau7dJQEPJi107yB6kc97sTKO6lAaD2YDwE9YDuPPSFVC735dtg9SK1IOysJNrwtQkE8BmJxPb2ZXT0hVYs9g+YNvLfuuz2nyhe9z7nHN5UVWDxea5E77F1avTIbyL256oG9ft+hPVWJAbwNoug82TCtvUrm072wgN86JPWGO3TRyTwOY4a8xJwPvkx5DL1f1B68RwkTvja7Q72BrQI9Pfs6PTdfeb3RxG09jJxVvfl22D3eCbQ9FbR6vTPtYrn0mzS+kqGkPDxXhbwyG8i98M9wveayuL1EpL88lNqvve3yL70RQmQ7VcZGPaPBjr1wyEA9fKaWOskMibwNomi8J9Rku9EeGz016Si8O1mivQ38lb0EgxO88P1VvcilmLuNA0a9lj8DvHCceD3lSSs9uFWsve6HBT6XEZ68ShS5PFJSE70dTIK86OvDvSNgsbzS8DU8bPz8PAuVpTxKQIE9/NmOPBhFFj7LsL67PJRKvIxu8LwSqVS8D8yTPSOOlj1g0gG8A+69vYz2AjxPhLK80fLSPbrL/LztWz09LAcZvqfKF73B/JO8lnzIvCk5OLxwMU69dmQCvQtp3bs6hwe9WZKKume4S7x3CLg9zK4hPLsjDT16P6a7MbTXPRp+IT0dtQ89GayGvcngwD2F8bO70R4bu8tFlDxcBr67xAWdvdnWfzzQTIC9zn6fPYSKwz3alx28h8GxPW74wj3eNxk+xUBFvIpjyj0WdRi9AkoIPXhvqLugx+U8F0ezvUlCHjx3NAC9uvlhPEOmXD36oAM9D56uvddgrz2giiC9GhWUvHrWGLv0yRk8fOPbvMc+KLs7//S8v5UjPJUV2D0KLjW6YKa5PDciNDuJznQ9USZLPQ=="}],"model":"text-embedding-3-large","usage":{"prompt_tokens":1,"total_tokens":1}} \ No newline at end of file diff --git a/tests/api-resources/embeddings-float-response.json b/tests/api-resources/embeddings-float-response.json new file mode 100644 index 000000000..9b5b788e2 --- /dev/null +++ b/tests/api-resources/embeddings-float-response.json @@ -0,0 +1 @@ +{"object":"list","data":[{"object":"embedding","index":0,"embedding":[-0.099287055,-0.032646775,0.022952586,0.028722659,0.05438033,0.009816091,0.042097155,0.112661555,-0.010402386,0.158172,-0.037476454,-0.01971345,-0.13049422,0.04734479,-0.0074244705,0.030905303,-0.06738331,0.046996493,-0.039008945,-0.018076468,0.10681021,-0.06664029,-0.08405499,-0.012863665,0.10151614,0.015986703,0.061253335,-0.018970422,0.008399694,-0.011064145,-0.049457774,0.14470463,-0.058745615,0.0021840946,0.00446397,0.058141906,0.099287055,0.0050763874,-0.09046361,-0.039008945,0.042886622,-0.103187956,-0.15454973,0.091810346,0.058002587,-0.041957837,0.028978076,0.02623816,-0.002097021,-0.040309247,-0.09250693,0.06928732,0.03229848,0.02623816,-0.08020054,0.022314047,0.18557113,0.079086,-0.030998182,0.030533789,-0.034829415,0.009705798,0.019492865,0.035084832,-0.122228034,-0.022523023,0.06278583,0.037685428,-0.019423205,0.13941054,0.00039908706,-0.052847836,0.035665322,0.04602127,-0.035618883,-0.04787884,0.049457774,0.096314944,-0.030998182,0.08823452,-0.03534025,-0.086841345,-0.06473628,0.03893929,0.06812634,-0.040495,-0.011133804,-0.22476584,0.045440778,0.06636165,0.03403995,0.032461017,-0.005227315,0.008092035,-0.025843427,0.048807625,0.0061880266,0.05670229,0.031509012,0.06993747,-0.034016732,0.10569567,0.0030620862,-0.011110584,0.011795563,0.058931373,0.054101694,0.068033464,-0.008660915,0.091763906,-0.0370585,0.000023809172,0.013188739,0.004437848,-0.053312227,-0.09770812,-0.06343598,0.07903956,-0.007906278,0.028397584,-0.084565826,-0.103466585,0.0017051902,0.0041185785,0.024636008,-0.016404655,-0.14024645,-0.034295365,-0.009694188,-0.14359008,-0.04778596,0.031903747,0.045649756,-0.06088182,0.058049027,-0.052151248,0.10569567,0.087909445,-0.061206896,-0.00021641403,-0.17637616,0.020096574,-0.016276948,-0.09770812,-0.058792055,-0.09018497,0.023393758,-0.08586612,-0.04295628,0.0034829418,0.048528988,-0.06970527,0.047066152,0.0011493708,-0.01672973,-0.014198792,-0.0034916492,0.037871186,-0.010309507,-0.079271756,-0.073234655,-0.0090034045,-0.052244127,-0.0046584345,-0.04834323,-0.008010766,0.060696065,0.04181852,-0.08414787,0.13040134,-0.019295497,0.022592682,-0.03596718,-0.015905434,-0.0956648,-0.021652287,0.011104779,0.030882083,0.02021267,0.0631109,0.017437927,0.14674795,-0.005819415,-0.012364443,-0.029349588,-0.012979763,0.072166555,0.07351329,-0.007923692,-0.09273913,0.007993352,-0.021791605,0.1030022,-0.030858863,0.046230245,-0.14944142,-0.0370585,-0.018064858,-0.02447347,-0.011244097,-0.050340116,-0.03183409,-0.006756907,-0.033087946,-0.001057218,-0.012434102,0.089859895,0.009868335,0.034457903,-0.005073485,0.10532416,0.0394269,0.035084832,-0.06575794,0.09417874,-0.005491438,-0.002366949,0.018099686,-0.005799098,-0.07667115,0.0156151885,-0.06264651,0.07787858,0.09547904,-0.009618724,0.086794905,0.095200405,0.14962718,-0.012039368,0.09882267,-0.037221037,0.033273704,-0.0051402412,0.02804929,-0.08753794,0.009659358,-0.031300034,0.01379245,0.053869497,0.03213594,-0.08526241,0.085633926,-0.039194703,-0.018076468,-0.0023321197,0.009386528,-0.026841871,-0.0025672184,-0.02990686,0.009984433,0.105509914,-0.00069114624,0.022662342,0.0027486214,0.05976728,0.04959709]}],"model":"text-embedding-3-large","usage":{"prompt_tokens":1,"total_tokens":1}} \ No newline at end of file diff --git a/tests/api-resources/embeddings.test.ts b/tests/api-resources/embeddings.test.ts index 46dd1b2a3..629265643 100644 --- a/tests/api-resources/embeddings.test.ts +++ b/tests/api-resources/embeddings.test.ts @@ -2,6 +2,9 @@ import OpenAI from 'openai'; import { Response } from 'node-fetch'; +import { mockFetch } from '../utils/mock-fetch'; +import fs from 'fs/promises'; +import Path from 'path'; const client = new OpenAI({ apiKey: 'My API Key', @@ -32,4 +35,73 @@ describe('resource embeddings', () => { user: 'user-1234', }); }); + + test('create: encoding_format=default should create float32 embeddings', async () => { + const client = makeClient(); + const response = await client.embeddings.create({ + input: 'The quick brown fox jumped over the lazy dog', + model: 'text-embedding-3-small', + }); + + expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); + expect(response.data?.at(0)?.embedding.at(0)).toBe(-0.09928705543279648); + }); + + test('create: encoding_format=float should create float32 embeddings', async () => { + const client = makeClient(); + const response = await client.embeddings.create({ + input: 'The quick brown fox jumped over the lazy dog', + model: 'text-embedding-3-small', + encoding_format: 'float', + }); + + expect(response.data?.at(0)?.embedding).toBeInstanceOf(Array); + expect(response.data?.at(0)?.embedding.at(0)).toBe(-0.099287055); + }); + + test('create: encoding_format=base64 should return base64 embeddings', async () => { + const client = makeClient(); + const response = await client.embeddings.create({ + input: 'The quick brown fox jumped over the lazy dog', + model: 'text-embedding-3-small', + encoding_format: 'base64', + }); + + expect(typeof response.data?.at(0)?.embedding).toBe('string'); + }); }); + +function makeClient(): OpenAI { + const { fetch, handleRequest } = mockFetch(); + + handleRequest(async (_, init) => { + const format = (JSON.parse(init!.body as string) as OpenAI.EmbeddingCreateParams).encoding_format; + return new Response( + await fs.readFile( + Path.join( + __dirname, + + // these responses were taken from the live API with: + // + // model: 'text-embedding-3-large', + // input: 'h', + // dimensions: 256, + + format === 'base64' ? 'embeddings-base64-response.json' : 'embeddings-float-response.json', + ), + ), + { + status: 200, + headers: { + 'Content-Type': 'application/json', + }, + }, + ); + }); + + return new OpenAI({ + fetch, + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', + }); +} diff --git a/tests/api-resources/evals/evals.test.ts b/tests/api-resources/evals/evals.test.ts new file mode 100644 index 000000000..45d1c4f9b --- /dev/null +++ b/tests/api-resources/evals/evals.test.ts @@ -0,0 +1,127 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource evals', () => { + test('create: only required params', async () => { + const responsePromise = client.evals.create({ + data_source_config: { item_schema: { foo: 'bar' }, type: 'custom' }, + testing_criteria: [ + { + input: [{ content: 'content', role: 'role' }], + labels: ['string'], + model: 'model', + name: 'name', + passing_labels: ['string'], + type: 'label_model', + }, + ], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.evals.create({ + data_source_config: { item_schema: { foo: 'bar' }, type: 'custom', include_sample_schema: true }, + testing_criteria: [ + { + input: [{ content: 'content', role: 'role' }], + labels: ['string'], + model: 'model', + name: 'name', + passing_labels: ['string'], + type: 'label_model', + }, + ], + metadata: { foo: 'string' }, + name: 'name', + }); + }); + + test('retrieve', async () => { + const responsePromise = client.evals.retrieve('eval_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.evals.retrieve('eval_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('update', async () => { + const responsePromise = client.evals.update('eval_id', {}); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list', async () => { + const responsePromise = client.evals.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.evals.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.list( + { after: 'after', limit: 0, order: 'asc', order_by: 'created_at' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.evals.del('eval_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.evals.del('eval_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); + }); +}); diff --git a/tests/api-resources/evals/runs/output-items.test.ts b/tests/api-resources/evals/runs/output-items.test.ts new file mode 100644 index 000000000..ff075b404 --- /dev/null +++ b/tests/api-resources/evals/runs/output-items.test.ts @@ -0,0 +1,61 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource outputItems', () => { + test('retrieve', async () => { + const responsePromise = client.evals.runs.outputItems.retrieve('eval_id', 'run_id', 'output_item_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.outputItems.retrieve('eval_id', 'run_id', 'output_item_id', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.evals.runs.outputItems.list('eval_id', 'run_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.outputItems.list('eval_id', 'run_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.outputItems.list( + 'eval_id', + 'run_id', + { after: 'after', limit: 0, order: 'asc', status: 'fail' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/beta/vector-stores/files.test.ts b/tests/api-resources/evals/runs/runs.test.ts similarity index 62% rename from tests/api-resources/beta/vector-stores/files.test.ts rename to tests/api-resources/evals/runs/runs.test.ts index 7c14d4de3..786df0ba1 100644 --- a/tests/api-resources/beta/vector-stores/files.test.ts +++ b/tests/api-resources/evals/runs/runs.test.ts @@ -8,9 +8,11 @@ const client = new OpenAI({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', }); -describe('resource files', () => { +describe('resource runs', () => { test('create: only required params', async () => { - const responsePromise = client.beta.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); + const responsePromise = client.evals.runs.create('eval_id', { + data_source: { source: { content: [{ item: { foo: 'bar' } }], type: 'file_content' }, type: 'jsonl' }, + }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,14 +23,18 @@ describe('resource files', () => { }); test('create: required and optional params', async () => { - const response = await client.beta.vectorStores.files.create('vs_abc123', { - file_id: 'file_id', - chunking_strategy: { type: 'auto' }, + const response = await client.evals.runs.create('eval_id', { + data_source: { + source: { content: [{ item: { foo: 'bar' }, sample: { foo: 'bar' } }], type: 'file_content' }, + type: 'jsonl', + }, + metadata: { foo: 'string' }, + name: 'name', }); }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123'); + const responsePromise = client.evals.runs.retrieve('eval_id', 'run_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -41,14 +47,12 @@ describe('resource files', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123', { - path: '/_stainless_unknown_path', - }), + client.evals.runs.retrieve('eval_id', 'run_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('list', async () => { - const responsePromise = client.beta.vectorStores.files.list('vector_store_id'); + const responsePromise = client.evals.runs.list('eval_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -60,24 +64,24 @@ describe('resource files', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.beta.vectorStores.files.list('vector_store_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(OpenAI.NotFoundError); + await expect(client.evals.runs.list('eval_id', { path: '/_stainless_unknown_path' })).rejects.toThrow( + OpenAI.NotFoundError, + ); }); test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.list( - 'vector_store_id', - { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, + client.evals.runs.list( + 'eval_id', + { after: 'after', limit: 0, order: 'asc', status: 'queued' }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); }); test('del', async () => { - const responsePromise = client.beta.vectorStores.files.del('vector_store_id', 'file_id'); + const responsePromise = client.evals.runs.del('eval_id', 'run_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -90,7 +94,25 @@ describe('resource files', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.del('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + client.evals.runs.del('eval_id', 'run_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('cancel', async () => { + const responsePromise = client.evals.runs.cancel('eval_id', 'run_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('cancel: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.evals.runs.cancel('eval_id', 'run_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts index bbaa45a65..c907c4987 100644 --- a/tests/api-resources/files.test.ts +++ b/tests/api-resources/files.test.ts @@ -69,7 +69,10 @@ describe('resource files', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.files.list({ purpose: 'purpose' }, { path: '/_stainless_unknown_path' }), + client.files.list( + { after: 'after', limit: 0, order: 'asc', purpose: 'purpose' }, + { path: '/_stainless_unknown_path' }, + ), ).rejects.toThrow(OpenAI.NotFoundError); }); diff --git a/tests/api-resources/fine-tuning/alpha/graders.test.ts b/tests/api-resources/fine-tuning/alpha/graders.test.ts new file mode 100644 index 000000000..8e47a4c42 --- /dev/null +++ b/tests/api-resources/fine-tuning/alpha/graders.test.ts @@ -0,0 +1,53 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource graders', () => { + test('run: only required params', async () => { + const responsePromise = client.fineTuning.alpha.graders.run({ + grader: { input: 'input', name: 'name', operation: 'eq', reference: 'reference', type: 'string_check' }, + model_sample: 'model_sample', + reference_answer: 'string', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('run: required and optional params', async () => { + const response = await client.fineTuning.alpha.graders.run({ + grader: { input: 'input', name: 'name', operation: 'eq', reference: 'reference', type: 'string_check' }, + model_sample: 'model_sample', + reference_answer: 'string', + }); + }); + + test('validate: only required params', async () => { + const responsePromise = client.fineTuning.alpha.graders.validate({ + grader: { input: 'input', name: 'name', operation: 'eq', reference: 'reference', type: 'string_check' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('validate: required and optional params', async () => { + const response = await client.fineTuning.alpha.graders.validate({ + grader: { input: 'input', name: 'name', operation: 'eq', reference: 'reference', type: 'string_check' }, + }); + }); +}); diff --git a/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts new file mode 100644 index 000000000..1e4b40a94 --- /dev/null +++ b/tests/api-resources/fine-tuning/checkpoints/permissions.test.ts @@ -0,0 +1,88 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource permissions', () => { + test('create: only required params', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.create( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + { project_ids: ['string'] }, + ); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.fineTuning.checkpoints.permissions.create( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + { project_ids: ['string'] }, + ); + }); + + test('retrieve', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.checkpoints.permissions.retrieve('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('retrieve: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.checkpoints.permissions.retrieve( + 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + { after: 'after', limit: 0, order: 'ascending', project_id: 'project_id' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.fineTuning.checkpoints.permissions.del( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + 'cp_zc4Q7MP6XxulcVzj4MZdwsAB', + ); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.checkpoints.permissions.del( + 'ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd', + 'cp_zc4Q7MP6XxulcVzj4MZdwsAB', + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/fine-tuning/jobs/jobs.test.ts b/tests/api-resources/fine-tuning/jobs/jobs.test.ts index 646c2f5cf..fe8c9efee 100644 --- a/tests/api-resources/fine-tuning/jobs/jobs.test.ts +++ b/tests/api-resources/fine-tuning/jobs/jobs.test.ts @@ -31,32 +31,41 @@ describe('resource jobs', () => { integrations: [ { type: 'wandb', - wandb: { - project: 'my-wandb-project', - entity: 'entity', - name: 'name', - tags: ['custom-tag', 'custom-tag', 'custom-tag'], - }, + wandb: { project: 'my-wandb-project', entity: 'entity', name: 'name', tags: ['custom-tag'] }, }, - { - type: 'wandb', - wandb: { - project: 'my-wandb-project', - entity: 'entity', - name: 'name', - tags: ['custom-tag', 'custom-tag', 'custom-tag'], + ], + method: { + type: 'supervised', + dpo: { + hyperparameters: { + batch_size: 'auto', + beta: 'auto', + learning_rate_multiplier: 'auto', + n_epochs: 'auto', }, }, - { - type: 'wandb', - wandb: { - project: 'my-wandb-project', - entity: 'entity', + reinforcement: { + grader: { + input: 'input', name: 'name', - tags: ['custom-tag', 'custom-tag', 'custom-tag'], + operation: 'eq', + reference: 'reference', + type: 'string_check', + }, + hyperparameters: { + batch_size: 'auto', + compute_multiplier: 'auto', + eval_interval: 'auto', + eval_samples: 'auto', + learning_rate_multiplier: 'auto', + n_epochs: 'auto', + reasoning_effort: 'default', }, }, - ], + supervised: { + hyperparameters: { batch_size: 'auto', learning_rate_multiplier: 'auto', n_epochs: 'auto' }, + }, + }, seed: 42, suffix: 'x', validation_file: 'file-abc123', @@ -152,4 +161,40 @@ describe('resource jobs', () => { ), ).rejects.toThrow(OpenAI.NotFoundError); }); + + test('pause', async () => { + const responsePromise = client.fineTuning.jobs.pause('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('pause: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.jobs.pause('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('resume', async () => { + const responsePromise = client.fineTuning.jobs.resume('ft-AF1WoRqd3aJAHsqc9NY7iL8F'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('resume: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.fineTuning.jobs.resume('ft-AF1WoRqd3aJAHsqc9NY7iL8F', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); }); diff --git a/tests/api-resources/images.test.ts b/tests/api-resources/images.test.ts index 88eb97a93..04fca0a2a 100644 --- a/tests/api-resources/images.test.ts +++ b/tests/api-resources/images.test.ts @@ -25,10 +25,10 @@ describe('resource images', () => { test('createVariation: required and optional params', async () => { const response = await client.images.createVariation({ image: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'dall-e-2', + model: 'string', n: 1, response_format: 'url', - size: '256x256', + size: '1024x1024', user: 'user-1234', }); }); @@ -51,11 +51,13 @@ describe('resource images', () => { const response = await client.images.edit({ image: await toFile(Buffer.from('# my file contents'), 'README.md'), prompt: 'A cute baby sea otter wearing a beret', + background: 'transparent', mask: await toFile(Buffer.from('# my file contents'), 'README.md'), - model: 'dall-e-2', + model: 'string', n: 1, + quality: 'high', response_format: 'url', - size: '256x256', + size: '1024x1024', user: 'user-1234', }); }); @@ -74,11 +76,15 @@ describe('resource images', () => { test('generate: required and optional params', async () => { const response = await client.images.generate({ prompt: 'A cute baby sea otter', - model: 'dall-e-3', + background: 'transparent', + model: 'string', + moderation: 'low', n: 1, - quality: 'standard', + output_compression: 100, + output_format: 'png', + quality: 'medium', response_format: 'url', - size: '256x256', + size: '1024x1024', style: 'vivid', user: 'user-1234', }); diff --git a/tests/api-resources/moderations.test.ts b/tests/api-resources/moderations.test.ts index 64f9acf3c..107ce9974 100644 --- a/tests/api-resources/moderations.test.ts +++ b/tests/api-resources/moderations.test.ts @@ -21,9 +21,6 @@ describe('resource moderations', () => { }); test('create: required and optional params', async () => { - const response = await client.moderations.create({ - input: 'I want to kill them.', - model: 'omni-moderation-2024-09-26', - }); + const response = await client.moderations.create({ input: 'I want to kill them.', model: 'string' }); }); }); diff --git a/tests/api-resources/responses/input-items.test.ts b/tests/api-resources/responses/input-items.test.ts new file mode 100644 index 000000000..25ab166c0 --- /dev/null +++ b/tests/api-resources/responses/input-items.test.ts @@ -0,0 +1,40 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource inputItems', () => { + test('list', async () => { + const responsePromise = client.responses.inputItems.list('response_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.inputItems.list('response_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.inputItems.list( + 'response_id', + { after: 'after', before: 'before', include: ['file_search_call.results'], limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/responses/responses.test.ts b/tests/api-resources/responses/responses.test.ts new file mode 100644 index 000000000..cf7e9cf3c --- /dev/null +++ b/tests/api-resources/responses/responses.test.ts @@ -0,0 +1,103 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource responses', () => { + test('create: only required params', async () => { + const responsePromise = client.responses.create({ input: 'string', model: 'gpt-4o' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.responses.create({ + input: 'string', + model: 'gpt-4o', + include: ['file_search_call.results'], + instructions: 'instructions', + max_output_tokens: 0, + metadata: { foo: 'string' }, + parallel_tool_calls: true, + previous_response_id: 'previous_response_id', + reasoning: { effort: 'low', generate_summary: 'auto', summary: 'auto' }, + service_tier: 'auto', + store: true, + stream: false, + temperature: 1, + text: { format: { type: 'text' } }, + tool_choice: 'none', + tools: [ + { + type: 'file_search', + vector_store_ids: ['string'], + filters: { key: 'key', type: 'eq', value: 'string' }, + max_num_results: 0, + ranking_options: { ranker: 'auto', score_threshold: 0 }, + }, + ], + top_p: 1, + truncation: 'auto', + user: 'user-1234', + }); + }); + + test('retrieve', async () => { + const responsePromise = client.responses.retrieve('resp_677efb5139a88190b512bc3fef8e535d'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.retrieve('resp_677efb5139a88190b512bc3fef8e535d', { + path: '/_stainless_unknown_path', + }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('retrieve: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.retrieve( + 'resp_677efb5139a88190b512bc3fef8e535d', + { include: ['file_search_call.results'] }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.responses.del('resp_677efb5139a88190b512bc3fef8e535d'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.del('resp_677efb5139a88190b512bc3fef8e535d', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/uploads/uploads.test.ts b/tests/api-resources/uploads/uploads.test.ts index e4e3c6d30..c9ea4ddd7 100644 --- a/tests/api-resources/uploads/uploads.test.ts +++ b/tests/api-resources/uploads/uploads.test.ts @@ -53,9 +53,7 @@ describe('resource uploads', () => { }); test('complete: only required params', async () => { - const responsePromise = client.uploads.complete('upload_abc123', { - part_ids: ['string', 'string', 'string'], - }); + const responsePromise = client.uploads.complete('upload_abc123', { part_ids: ['string'] }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -66,9 +64,6 @@ describe('resource uploads', () => { }); test('complete: required and optional params', async () => { - const response = await client.uploads.complete('upload_abc123', { - part_ids: ['string', 'string', 'string'], - md5: 'md5', - }); + const response = await client.uploads.complete('upload_abc123', { part_ids: ['string'], md5: 'md5' }); }); }); diff --git a/tests/api-resources/beta/vector-stores/file-batches.test.ts b/tests/api-resources/vector-stores/file-batches.test.ts similarity index 81% rename from tests/api-resources/beta/vector-stores/file-batches.test.ts rename to tests/api-resources/vector-stores/file-batches.test.ts index b714049b4..c0447a838 100644 --- a/tests/api-resources/beta/vector-stores/file-batches.test.ts +++ b/tests/api-resources/vector-stores/file-batches.test.ts @@ -10,9 +10,7 @@ const client = new OpenAI({ describe('resource fileBatches', () => { test('create: only required params', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.create('vs_abc123', { - file_ids: ['string'], - }); + const responsePromise = client.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'] }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -23,14 +21,15 @@ describe('resource fileBatches', () => { }); test('create: required and optional params', async () => { - const response = await client.beta.vectorStores.fileBatches.create('vs_abc123', { + const response = await client.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'], + attributes: { foo: 'string' }, chunking_strategy: { type: 'auto' }, }); }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123'); + const responsePromise = client.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -43,14 +42,14 @@ describe('resource fileBatches', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123', { + client.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('cancel', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); + const responsePromise = client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -63,14 +62,14 @@ describe('resource fileBatches', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { + client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('listFiles', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); + const responsePromise = client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -83,7 +82,7 @@ describe('resource fileBatches', () => { test('listFiles: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { + client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); @@ -92,7 +91,7 @@ describe('resource fileBatches', () => { test('listFiles: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.listFiles( + client.vectorStores.fileBatches.listFiles( 'vector_store_id', 'batch_id', { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, diff --git a/tests/api-resources/vector-stores/files.test.ts b/tests/api-resources/vector-stores/files.test.ts new file mode 100644 index 000000000..86a8f9bb4 --- /dev/null +++ b/tests/api-resources/vector-stores/files.test.ts @@ -0,0 +1,132 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', +}); + +describe('resource files', () => { + test('create: only required params', async () => { + const responsePromise = client.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.vectorStores.files.create('vs_abc123', { + file_id: 'file_id', + attributes: { foo: 'string' }, + chunking_strategy: { type: 'auto' }, + }); + }); + + test('retrieve', async () => { + const responsePromise = client.vectorStores.files.retrieve('vs_abc123', 'file-abc123'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.retrieve('vs_abc123', 'file-abc123', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('update: only required params', async () => { + const responsePromise = client.vectorStores.files.update('vs_abc123', 'file-abc123', { + attributes: { foo: 'string' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('update: required and optional params', async () => { + const response = await client.vectorStores.files.update('vs_abc123', 'file-abc123', { + attributes: { foo: 'string' }, + }); + }); + + test('list', async () => { + const responsePromise = client.vectorStores.files.list('vector_store_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.list('vector_store_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.list( + 'vector_store_id', + { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.vectorStores.files.del('vector_store_id', 'file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.del('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('content', async () => { + const responsePromise = client.vectorStores.files.content('vs_abc123', 'file-abc123'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('content: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.content('vs_abc123', 'file-abc123', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/beta/vector-stores/vector-stores.test.ts b/tests/api-resources/vector-stores/vector-stores.test.ts similarity index 71% rename from tests/api-resources/beta/vector-stores/vector-stores.test.ts rename to tests/api-resources/vector-stores/vector-stores.test.ts index 806098de8..465904a00 100644 --- a/tests/api-resources/beta/vector-stores/vector-stores.test.ts +++ b/tests/api-resources/vector-stores/vector-stores.test.ts @@ -10,7 +10,7 @@ const client = new OpenAI({ describe('resource vectorStores', () => { test('create', async () => { - const responsePromise = client.beta.vectorStores.create({}); + const responsePromise = client.vectorStores.create({}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,7 +21,7 @@ describe('resource vectorStores', () => { }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.retrieve('vector_store_id'); + const responsePromise = client.vectorStores.retrieve('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -34,12 +34,12 @@ describe('resource vectorStores', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.retrieve('vector_store_id', { path: '/_stainless_unknown_path' }), + client.vectorStores.retrieve('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = client.beta.vectorStores.update('vector_store_id', {}); + const responsePromise = client.vectorStores.update('vector_store_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -50,7 +50,7 @@ describe('resource vectorStores', () => { }); test('list', async () => { - const responsePromise = client.beta.vectorStores.list(); + const responsePromise = client.vectorStores.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -62,7 +62,7 @@ describe('resource vectorStores', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.beta.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); @@ -70,7 +70,7 @@ describe('resource vectorStores', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.list( + client.vectorStores.list( { after: 'after', before: 'before', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), @@ -78,7 +78,7 @@ describe('resource vectorStores', () => { }); test('del', async () => { - const responsePromise = client.beta.vectorStores.del('vector_store_id'); + const responsePromise = client.vectorStores.del('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -91,7 +91,28 @@ describe('resource vectorStores', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.del('vector_store_id', { path: '/_stainless_unknown_path' }), + client.vectorStores.del('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); + + test('search: only required params', async () => { + const responsePromise = client.vectorStores.search('vs_abc123', { query: 'string' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('search: required and optional params', async () => { + const response = await client.vectorStores.search('vs_abc123', { + query: 'string', + filters: { key: 'key', type: 'eq', value: 'string' }, + max_num_results: 1, + ranking_options: { ranker: 'auto', score_threshold: 0 }, + rewrite_query: true, + }); + }); }); diff --git a/tests/helpers/zod.test.ts b/tests/helpers/zod.test.ts index 493b4c0c8..02d8a7a8f 100644 --- a/tests/helpers/zod.test.ts +++ b/tests/helpers/zod.test.ts @@ -278,4 +278,56 @@ describe('zodResponseFormat', () => { } `); }); + + it('warns on optional fields', () => { + const consoleSpy = jest.spyOn(console, 'warn'); + consoleSpy.mockClear(); + + zodResponseFormat( + z.object({ + required: z.string(), + optional: z.string().optional(), + optional_and_nullable: z.string().optional().nullable(), + }), + 'schema', + ); + + expect(consoleSpy).toHaveBeenCalledWith( + 'Zod field at `#/definitions/schema/properties/optional` uses `.optional()` without `.nullable()` which is not supported by the API. See: https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#all-fields-must-be-required\nThis will become an error in a future version of the SDK.', + ); + expect(consoleSpy).toHaveBeenCalledTimes(1); + }); + + it('warns on nested optional fields', () => { + const consoleSpy = jest.spyOn(console, 'warn'); + consoleSpy.mockClear(); + + zodResponseFormat( + z.object({ + foo: z.object({ bar: z.array(z.object({ can_be_missing: z.boolean().optional() })) }), + }), + 'schema', + ); + + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'Zod field at `#/definitions/schema/properties/foo/properties/bar/items/properties/can_be_missing` uses `.optional()`', + ), + ); + expect(consoleSpy).toHaveBeenCalledTimes(1); + }); + + it('does not warn on union nullable fields', () => { + const consoleSpy = jest.spyOn(console, 'warn'); + consoleSpy.mockClear(); + + zodResponseFormat( + z.object({ + union: z.union([z.string(), z.null()]).optional(), + }), + 'schema', + ); + + expect(consoleSpy).toHaveBeenCalledTimes(0); + }); }); diff --git a/tests/index.test.ts b/tests/index.test.ts index b55ec5f67..6227d6fbe 100644 --- a/tests/index.test.ts +++ b/tests/index.test.ts @@ -2,7 +2,7 @@ import OpenAI from 'openai'; import { APIUserAbortError } from 'openai'; -import { Headers } from 'openai/core'; +import { debug, Headers } from 'openai/core'; import defaultFetch, { Response, type RequestInit, type RequestInfo } from 'node-fetch'; describe('instantiate client', () => { @@ -96,6 +96,15 @@ describe('instantiate client', () => { expect(response).toEqual({ url: 'http://localhost:5000/foo', custom: true }); }); + test('explicit global fetch', async () => { + // make sure the global fetch type is assignable to our Fetch type + const client = new OpenAI({ + baseURL: 'http://localhost:5000/', + apiKey: 'My API Key', + fetch: defaultFetch, + }); + }); + test('custom signal', async () => { const client = new OpenAI({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010/', @@ -122,6 +131,19 @@ describe('instantiate client', () => { expect(spy).toHaveBeenCalledTimes(1); }); + test('normalized method', async () => { + let capturedRequest: RequestInit | undefined; + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { + capturedRequest = init; + return new Response(JSON.stringify({}), { headers: { 'Content-Type': 'application/json' } }); + }; + + const client = new OpenAI({ baseURL: 'http://localhost:5000/', apiKey: 'My API Key', fetch: testFetch }); + + await client.patch('/foo'); + expect(capturedRequest?.method).toEqual('PATCH'); + }); + describe('baseUrl', () => { test('trailing slash', () => { const client = new OpenAI({ baseURL: 'http://localhost:5000/custom/path/', apiKey: 'My API Key' }); @@ -177,7 +199,7 @@ describe('instantiate client', () => { expect(client.apiKey).toBe('My API Key'); }); - test('with overriden environment variable arguments', () => { + test('with overridden environment variable arguments', () => { // set options via env var process.env['OPENAI_API_KEY'] = 'another My API Key'; const client = new OpenAI({ apiKey: 'My API Key' }); @@ -295,6 +317,39 @@ describe('retries', () => { expect(capturedRequest!.headers as Headers).not.toHaveProperty('x-stainless-retry-count'); }); + test('omit retry count header by default', async () => { + let count = 0; + let capturedRequest: RequestInit | undefined; + const testFetch = async (url: RequestInfo, init: RequestInit = {}): Promise => { + count++; + if (count <= 2) { + return new Response(undefined, { + status: 429, + headers: { + 'Retry-After': '0.1', + }, + }); + } + capturedRequest = init; + return new Response(JSON.stringify({ a: 1 }), { headers: { 'Content-Type': 'application/json' } }); + }; + const client = new OpenAI({ + apiKey: 'My API Key', + fetch: testFetch, + maxRetries: 4, + defaultHeaders: { 'X-Stainless-Retry-Count': null }, + }); + + expect( + await client.request({ + path: '/foo', + method: 'get', + }), + ).toEqual({ a: 1 }); + + expect(capturedRequest!.headers as Headers).not.toHaveProperty('x-stainless-retry-count'); + }); + test('overwrite retry count header', async () => { let count = 0; let capturedRequest: RequestInit | undefined; @@ -378,3 +433,95 @@ describe('retries', () => { expect(count).toEqual(3); }); }); + +describe('debug()', () => { + const env = process.env; + const spy = jest.spyOn(console, 'log'); + + beforeEach(() => { + jest.resetModules(); + process.env = { ...env }; + process.env['DEBUG'] = 'true'; + }); + + afterEach(() => { + process.env = env; + }); + + test('body request object with Authorization header', function () { + // Test request body includes headers object with Authorization + const headersTest = { + headers: { + Authorization: 'fakeAuthorization', + }, + }; + debug('request', headersTest); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + headers: { + Authorization: 'REDACTED', + }, + }); + }); + + test('body request object with api-key header', function () { + // Test request body includes headers object with api-ley + const apiKeyTest = { + headers: { + 'api-key': 'fakeKey', + }, + }; + debug('request', apiKeyTest); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + headers: { + 'api-key': 'REDACTED', + }, + }); + }); + + test('header object with Authorization header', function () { + // Test headers object with authorization header + const authorizationTest = { + authorization: 'fakeValue', + }; + debug('request', authorizationTest); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + authorization: 'REDACTED', + }); + }); + + test('input args are not mutated', function () { + const authorizationTest = { + authorization: 'fakeValue', + }; + const client = new OpenAI({ + baseURL: 'http://localhost:5000/', + defaultHeaders: authorizationTest, + apiKey: 'api-key', + }); + + const { req } = client.buildRequest({ path: '/foo', method: 'post' }); + debug('request', authorizationTest); + expect((req.headers as Headers)['authorization']).toEqual('fakeValue'); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + authorization: 'REDACTED', + }); + }); + + test('input headers are not mutated', function () { + const authorizationTest = { + authorization: 'fakeValue', + }; + const client = new OpenAI({ + baseURL: 'http://localhost:5000/', + defaultHeaders: authorizationTest, + apiKey: 'api-key', + }); + + const { req } = client.buildRequest({ path: '/foo', method: 'post' }); + debug('request', { headers: req.headers }); + expect((req.headers as Headers)['authorization']).toEqual('fakeValue'); + expect(spy).toHaveBeenCalledWith('OpenAI:DEBUG:request', { + authorization: 'REDACTED', + }); + }); +}); diff --git a/tests/internal/decoders/line.test.ts b/tests/internal/decoders/line.test.ts new file mode 100644 index 000000000..e76858e55 --- /dev/null +++ b/tests/internal/decoders/line.test.ts @@ -0,0 +1,128 @@ +import { findDoubleNewlineIndex, LineDecoder } from 'openai/internal/decoders/line'; + +function decodeChunks(chunks: string[], { flush }: { flush: boolean } = { flush: false }): string[] { + const decoder = new LineDecoder(); + const lines: string[] = []; + for (const chunk of chunks) { + lines.push(...decoder.decode(chunk)); + } + + if (flush) { + lines.push(...decoder.flush()); + } + + return lines; +} + +describe('line decoder', () => { + test('basic', () => { + // baz is not included because the line hasn't ended yet + expect(decodeChunks(['foo', ' bar\nbaz'])).toEqual(['foo bar']); + }); + + test('basic with \\r', () => { + expect(decodeChunks(['foo', ' bar\r\nbaz'])).toEqual(['foo bar']); + expect(decodeChunks(['foo', ' bar\r\nbaz'], { flush: true })).toEqual(['foo bar', 'baz']); + }); + + test('trailing new lines', () => { + expect(decodeChunks(['foo', ' bar', 'baz\n', 'thing\n'])).toEqual(['foo barbaz', 'thing']); + }); + + test('trailing new lines with \\r', () => { + expect(decodeChunks(['foo', ' bar', 'baz\r\n', 'thing\r\n'])).toEqual(['foo barbaz', 'thing']); + }); + + test('escaped new lines', () => { + expect(decodeChunks(['foo', ' bar\\nbaz\n'])).toEqual(['foo bar\\nbaz']); + }); + + test('escaped new lines with \\r', () => { + expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']); + }); + + test('\\r & \\n split across multiple chunks', () => { + expect(decodeChunks(['foo\r', '\n', 'bar'], { flush: true })).toEqual(['foo', 'bar']); + }); + + test('single \\r', () => { + expect(decodeChunks(['foo\r', 'bar'], { flush: true })).toEqual(['foo', 'bar']); + }); + + test('double \\r', () => { + expect(decodeChunks(['foo\r', 'bar\r'], { flush: true })).toEqual(['foo', 'bar']); + expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + // implementation detail that we don't yield the single \r line until a new \r or \n is encountered + expect(decodeChunks(['foo\r', '\r', 'bar'], { flush: false })).toEqual(['foo']); + }); + + test('double \\r then \\r\\n', () => { + expect(decodeChunks(['foo\r', '\r', '\r', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); + expect(decodeChunks(['foo\n', '\n', '\n', 'bar', '\n'])).toEqual(['foo', '', '', 'bar']); + }); + + test('double newline', () => { + expect(decodeChunks(['foo\n\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo', '\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + expect(decodeChunks(['foo', '\n', '\n', 'bar'], { flush: true })).toEqual(['foo', '', 'bar']); + }); + + test('multi-byte characters across chunks', () => { + const decoder = new LineDecoder(); + + // bytes taken from the string 'известни' and arbitrarily split + // so that some multi-byte characters span multiple chunks + expect(decoder.decode(new Uint8Array([0xd0]))).toHaveLength(0); + expect(decoder.decode(new Uint8Array([0xb8, 0xd0, 0xb7, 0xd0]))).toHaveLength(0); + expect( + decoder.decode(new Uint8Array([0xb2, 0xd0, 0xb5, 0xd1, 0x81, 0xd1, 0x82, 0xd0, 0xbd, 0xd0, 0xb8])), + ).toHaveLength(0); + + const decoded = decoder.decode(new Uint8Array([0xa])); + expect(decoded).toEqual(['известни']); + }); + + test('flushing trailing newlines', () => { + expect(decodeChunks(['foo\n', '\nbar'], { flush: true })).toEqual(['foo', '', 'bar']); + }); + + test('flushing empty buffer', () => { + expect(decodeChunks([], { flush: true })).toEqual([]); + }); +}); + +describe('findDoubleNewlineIndex', () => { + test('finds \\n\\n', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\n\nbar'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\n\nbar'))).toBe(2); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\n\n'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\n\n'))).toBe(2); + }); + + test('finds \\r\\r', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\rbar'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\rbar'))).toBe(2); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\r'))).toBe(5); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\r'))).toBe(2); + }); + + test('finds \\r\\n\\r\\n', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n\r\nbar'))).toBe(7); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\n\r\nbar'))).toBe(4); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n\r\n'))).toBe(7); + expect(findDoubleNewlineIndex(new TextEncoder().encode('\r\n\r\n'))).toBe(4); + }); + + test('returns -1 when no double newline found', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\nbar'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\rbar'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\nbar'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode(''))).toBe(-1); + }); + + test('handles incomplete patterns', () => { + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n\r'))).toBe(-1); + expect(findDoubleNewlineIndex(new TextEncoder().encode('foo\r\n'))).toBe(-1); + }); +}); diff --git a/tests/lib/ChatCompletionRunFunctions.test.ts b/tests/lib/ChatCompletionRunFunctions.test.ts index b684f204d..496501a86 100644 --- a/tests/lib/ChatCompletionRunFunctions.test.ts +++ b/tests/lib/ChatCompletionRunFunctions.test.ts @@ -628,7 +628,7 @@ describe('resource completions', () => { content: "it's raining", parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.functionCallResults).toEqual([`it's raining`]); @@ -876,7 +876,7 @@ describe('resource completions', () => { content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.functionCallResults).toEqual(['3']); @@ -1125,7 +1125,7 @@ describe('resource completions', () => { content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.functionCallResults).toEqual([`must be an object`, '3']); @@ -1443,7 +1443,7 @@ describe('resource completions', () => { content: "it's raining", parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.functionCallResults).toEqual([ @@ -1572,7 +1572,7 @@ describe('resource completions', () => { content: "it's raining", parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.eventFunctionCallResults).toEqual([`it's raining`]); @@ -1795,7 +1795,7 @@ describe('resource completions', () => { content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.eventFunctionCallResults).toEqual(['3']); @@ -1997,7 +1997,7 @@ describe('resource completions', () => { content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.eventFunctionCallResults).toEqual([`must be an object`, '3']); @@ -2301,7 +2301,7 @@ describe('resource completions', () => { content: "it's raining", parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }, ]); expect(listener.eventFunctionCallResults).toEqual([ @@ -2347,7 +2347,7 @@ describe('resource completions', () => { content: 'The weather is great today!', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }); await listener.sanityCheck(); }); @@ -2386,7 +2386,7 @@ describe('resource completions', () => { content: 'The weather is great today!', parsed: null, refusal: null, - tool_calls: [], + tool_calls: undefined, }); await listener.sanityCheck(); }); diff --git a/tests/lib/ChatCompletionStream.test.ts b/tests/lib/ChatCompletionStream.test.ts index e5ef20c9e..34c5fd204 100644 --- a/tests/lib/ChatCompletionStream.test.ts +++ b/tests/lib/ChatCompletionStream.test.ts @@ -39,7 +39,6 @@ describe('.stream()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], }, } `); @@ -198,7 +197,6 @@ describe('.stream()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], }, } `); @@ -386,7 +384,6 @@ describe('.stream()', () => { "parsed": null, "refusal": "I'm very sorry, but I can't assist with that request.", "role": "assistant", - "tool_calls": [], }, } `); diff --git a/tests/lib/azure.test.ts b/tests/lib/azure.test.ts index 064a0098c..430efbe57 100644 --- a/tests/lib/azure.test.ts +++ b/tests/lib/azure.test.ts @@ -51,6 +51,18 @@ describe('instantiate azure client', () => { }); expect(req.headers as Headers).not.toHaveProperty('x-my-default-header'); }); + + test('includes retry count', () => { + const { req } = client.buildRequest( + { + path: '/foo', + method: 'post', + headers: { 'X-My-Default-Header': null }, + }, + { retryCount: 1 }, + ); + expect((req.headers as Headers)['x-stainless-retry-count']).toEqual('1'); + }); }); describe('defaultQuery', () => { @@ -483,21 +495,23 @@ describe('azure request building', () => { ); }); - test('Audio translations is not handled', async () => { + test('handles audio translations', async () => { const { url } = (await client.audio.translations.create({ model: deployment, file: { url: 'https://example.com/', blob: () => 0 as any }, })) as any; - expect(url).toStrictEqual(`https://example.com/openai/audio/translations?api-version=${apiVersion}`); + expect(url).toStrictEqual( + `https://example.com/openai/deployments/${deployment}/audio/translations?api-version=${apiVersion}`, + ); }); - test('Audio transcriptions is not handled', async () => { + test('handles audio transcriptions', async () => { const { url } = (await client.audio.transcriptions.create({ model: deployment, file: { url: 'https://example.com/', blob: () => 0 as any }, })) as any; expect(url).toStrictEqual( - `https://example.com/openai/audio/transcriptions?api-version=${apiVersion}`, + `https://example.com/openai/deployments/${deployment}/audio/transcriptions?api-version=${apiVersion}`, ); }); diff --git a/tests/lib/parser.test.ts b/tests/lib/parser.test.ts index b220e92d3..fa8123f5c 100644 --- a/tests/lib/parser.test.ts +++ b/tests/lib/parser.test.ts @@ -39,7 +39,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], }, } `); @@ -154,7 +153,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); @@ -488,7 +486,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); }); @@ -787,7 +784,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); }); @@ -947,7 +943,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); }); @@ -1061,7 +1056,6 @@ describe('.parse()', () => { }, "refusal": null, "role": "assistant", - "tool_calls": [], } `); }); diff --git a/tests/streaming.test.ts b/tests/streaming.test.ts index 6fe9a5781..b9a38f208 100644 --- a/tests/streaming.test.ts +++ b/tests/streaming.test.ts @@ -1,35 +1,7 @@ import { Response } from 'node-fetch'; import { PassThrough } from 'stream'; import assert from 'assert'; -import { _iterSSEMessages, _decodeChunks as decodeChunks } from 'openai/streaming'; - -describe('line decoder', () => { - test('basic', () => { - // baz is not included because the line hasn't ended yet - expect(decodeChunks(['foo', ' bar\nbaz'])).toEqual(['foo bar']); - }); - - test('basic with \\r', () => { - // baz is not included because the line hasn't ended yet - expect(decodeChunks(['foo', ' bar\r\nbaz'])).toEqual(['foo bar']); - }); - - test('trailing new lines', () => { - expect(decodeChunks(['foo', ' bar', 'baz\n', 'thing\n'])).toEqual(['foo barbaz', 'thing']); - }); - - test('trailing new lines with \\r', () => { - expect(decodeChunks(['foo', ' bar', 'baz\r\n', 'thing\r\n'])).toEqual(['foo barbaz', 'thing']); - }); - - test('escaped new lines', () => { - expect(decodeChunks(['foo', ' bar\\nbaz\n'])).toEqual(['foo bar\\nbaz']); - }); - - test('escaped new lines with \\r', () => { - expect(decodeChunks(['foo', ' bar\\r\\nbaz\n'])).toEqual(['foo bar\\r\\nbaz']); - }); -}); +import { _iterSSEMessages } from 'openai/streaming'; describe('streaming decoding', () => { test('basic', async () => { diff --git a/tsconfig.deno.json b/tsconfig.deno.json index d0e9473d9..849e070db 100644 --- a/tsconfig.deno.json +++ b/tsconfig.deno.json @@ -1,19 +1,14 @@ { "extends": "./tsconfig.json", - "include": ["deno"], + "include": ["dist-deno"], "exclude": [], "compilerOptions": { - "rootDir": "./deno", + "rootDir": "./dist-deno", "lib": ["es2020", "DOM"], - "paths": { - "openai/_shims/auto/*": ["deno/_shims/auto/*-deno"], - "openai/*": ["deno/*"], - "openai": ["deno/index.ts"], - }, "noEmit": true, "declaration": true, "declarationMap": true, - "outDir": "deno", + "outDir": "dist-deno", "pretty": true, "sourceMap": true } diff --git a/tsconfig.json b/tsconfig.json index 5f99085fc..33767f7b1 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -11,7 +11,7 @@ "paths": { "openai/_shims/auto/*": ["src/_shims/auto/*-node"], "openai/*": ["src/*"], - "openai": ["src/index.ts"], + "openai": ["src/index.ts"] }, "noEmit": true, @@ -32,6 +32,7 @@ "noUncheckedIndexedAccess": true, "noImplicitOverride": true, "noPropertyAccessFromIndexSignature": true, + "isolatedModules": true, "skipLibCheck": true } diff --git a/yarn.lock b/yarn.lock index 5a01e39e3..ad5fb7630 100644 --- a/yarn.lock +++ b/yarn.lock @@ -322,9 +322,9 @@ eslint-visitor-keys "^3.3.0" "@eslint-community/regexpp@^4.5.1": - version "4.9.0" - resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.9.0.tgz#7ccb5f58703fa61ffdcbf39e2c604a109e781162" - integrity sha512-zJmuCWj2VLBt4c25CfBIbMZLGLyhkvs7LznyVX5HfpzeocThgIj5XQK4L+g3U36mMcx8bPMhGyPpwCATamC4jQ== + version "4.11.1" + resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.11.1.tgz#a547badfc719eb3e5f4b556325e542fbe9d7a18f" + integrity sha512-m4DVN9ZqskZoLU5GlWZadwDnYo3vAEydiUayB9widCl9ffWx2IvPnp6n3on5rJmziJSw9Bv+Z3ChDVdMwXCY8Q== "@eslint-community/regexpp@^4.6.1": version "4.6.2" @@ -759,16 +759,6 @@ dependencies: "@swc/counter" "^0.1.3" -"@ts-morph/common@~0.20.0": - version "0.20.0" - resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.20.0.tgz#3f161996b085ba4519731e4d24c35f6cba5b80af" - integrity sha512-7uKjByfbPpwuzkstL3L5MQyuXPSKdoNG93Fmi2JoDcTf3pEP731JdRFAduRVkOs8oqxPsXKA+ScrWkdQ8t/I+Q== - dependencies: - fast-glob "^3.2.12" - minimatch "^7.4.3" - mkdirp "^2.1.6" - path-browserify "^1.0.1" - "@tsconfig/node10@^1.0.7": version "1.0.8" resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.8.tgz#c1e4e80d6f964fbecb3359c43bd48b40f7cadad9" @@ -857,9 +847,9 @@ pretty-format "^29.0.0" "@types/json-schema@^7.0.12": - version "7.0.13" - resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.13.tgz#02c24f4363176d2d18fc8b70b9f3c54aba178a85" - integrity sha512-RbSSoHliUbnXj3ny0CNFOoxrIDV6SUGyStHsvDqosw6CkdPV8TtWGlfecuK4ToyMEAql6pzNxgCFKanovUzlgQ== + version "7.0.15" + resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841" + integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== "@types/node-fetch@^2.6.4": version "2.6.4" @@ -882,15 +872,22 @@ integrity sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA== "@types/semver@^7.5.0": - version "7.5.3" - resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.3.tgz#9a726e116beb26c24f1ccd6850201e1246122e04" - integrity sha512-OxepLK9EuNEIPxWNME+C6WwbRAOOI2o2BaQEGzz5Lu2e4Z5eDnEo+/aVEDMIXywoJitJ7xWd641wrGLZdtwRyw== + version "7.5.8" + resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.8.tgz#8268a8c57a3e4abd25c165ecd36237db7948a55e" + integrity sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ== "@types/stack-utils@^2.0.0": version "2.0.3" resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-2.0.3.tgz#6209321eb2c1712a7e7466422b8cb1fc0d9dd5d8" integrity sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw== +"@types/ws@^8.5.13": + version "8.5.13" + resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.13.tgz#6414c280875e2691d0d1e080b05addbf5cb91e20" + integrity sha512-osM/gWBTPKgHV8XkTunnegTRIsvF6owmf5w+JtAfOw472dptdm0dlGv4xCt6GwQRcC2XVOvvRE/0bAoQcL2QkA== + dependencies: + "@types/node" "*" + "@types/yargs-parser@*": version "21.0.3" resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-21.0.3.tgz#815e30b786d2e8f0dcd85fd5bcf5e1a04d008f15" @@ -904,15 +901,15 @@ "@types/yargs-parser" "*" "@typescript-eslint/eslint-plugin@^6.7.0": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.7.3.tgz#d98046e9f7102d49a93d944d413c6055c47fafd7" - integrity sha512-vntq452UHNltxsaaN+L9WyuMch8bMd9CqJ3zhzTPXXidwbf5mqqKCVXEuvRZUqLJSTLeWE65lQwyXsRGnXkCTA== + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz#30830c1ca81fd5f3c2714e524c4303e0194f9cd3" + integrity sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA== dependencies: "@eslint-community/regexpp" "^4.5.1" - "@typescript-eslint/scope-manager" "6.7.3" - "@typescript-eslint/type-utils" "6.7.3" - "@typescript-eslint/utils" "6.7.3" - "@typescript-eslint/visitor-keys" "6.7.3" + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/type-utils" "6.21.0" + "@typescript-eslint/utils" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" debug "^4.3.4" graphemer "^1.4.0" ignore "^5.2.4" @@ -921,71 +918,72 @@ ts-api-utils "^1.0.1" "@typescript-eslint/parser@^6.7.0": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-6.7.3.tgz#aaf40092a32877439e5957e18f2d6a91c82cc2fd" - integrity sha512-TlutE+iep2o7R8Lf+yoer3zU6/0EAUc8QIBB3GYBc1KGz4c4TRm83xwXUZVPlZ6YCLss4r77jbu6j3sendJoiQ== - dependencies: - "@typescript-eslint/scope-manager" "6.7.3" - "@typescript-eslint/types" "6.7.3" - "@typescript-eslint/typescript-estree" "6.7.3" - "@typescript-eslint/visitor-keys" "6.7.3" + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-6.21.0.tgz#af8fcf66feee2edc86bc5d1cf45e33b0630bf35b" + integrity sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ== + dependencies: + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/typescript-estree" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" debug "^4.3.4" -"@typescript-eslint/scope-manager@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.7.3.tgz#07e5709c9bdae3eaf216947433ef97b3b8b7d755" - integrity sha512-wOlo0QnEou9cHO2TdkJmzF7DFGvAKEnB82PuPNHpT8ZKKaZu6Bm63ugOTn9fXNJtvuDPanBc78lGUGGytJoVzQ== +"@typescript-eslint/scope-manager@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz#ea8a9bfc8f1504a6ac5d59a6df308d3a0630a2b1" + integrity sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg== dependencies: - "@typescript-eslint/types" "6.7.3" - "@typescript-eslint/visitor-keys" "6.7.3" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" -"@typescript-eslint/type-utils@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-6.7.3.tgz#c2c165c135dda68a5e70074ade183f5ad68f3400" - integrity sha512-Fc68K0aTDrKIBvLnKTZ5Pf3MXK495YErrbHb1R6aTpfK5OdSFj0rVN7ib6Tx6ePrZ2gsjLqr0s98NG7l96KSQw== +"@typescript-eslint/type-utils@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz#6473281cfed4dacabe8004e8521cee0bd9d4c01e" + integrity sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag== dependencies: - "@typescript-eslint/typescript-estree" "6.7.3" - "@typescript-eslint/utils" "6.7.3" + "@typescript-eslint/typescript-estree" "6.21.0" + "@typescript-eslint/utils" "6.21.0" debug "^4.3.4" ts-api-utils "^1.0.1" -"@typescript-eslint/types@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.7.3.tgz#0402b5628a63f24f2dc9d4a678e9a92cc50ea3e9" - integrity sha512-4g+de6roB2NFcfkZb439tigpAMnvEIg3rIjWQ+EM7IBaYt/CdJt6em9BJ4h4UpdgaBWdmx2iWsafHTrqmgIPNw== +"@typescript-eslint/types@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.21.0.tgz#205724c5123a8fef7ecd195075fa6e85bac3436d" + integrity sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg== -"@typescript-eslint/typescript-estree@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.3.tgz#ec5bb7ab4d3566818abaf0e4a8fa1958561b7279" - integrity sha512-YLQ3tJoS4VxLFYHTw21oe1/vIZPRqAO91z6Uv0Ss2BKm/Ag7/RVQBcXTGcXhgJMdA4U+HrKuY5gWlJlvoaKZ5g== +"@typescript-eslint/typescript-estree@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz#c47ae7901db3b8bddc3ecd73daff2d0895688c46" + integrity sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ== dependencies: - "@typescript-eslint/types" "6.7.3" - "@typescript-eslint/visitor-keys" "6.7.3" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" debug "^4.3.4" globby "^11.1.0" is-glob "^4.0.3" + minimatch "9.0.3" semver "^7.5.4" ts-api-utils "^1.0.1" -"@typescript-eslint/utils@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-6.7.3.tgz#96c655816c373135b07282d67407cb577f62e143" - integrity sha512-vzLkVder21GpWRrmSR9JxGZ5+ibIUSudXlW52qeKpzUEQhRSmyZiVDDj3crAth7+5tmN1ulvgKaCU2f/bPRCzg== +"@typescript-eslint/utils@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-6.21.0.tgz#4714e7a6b39e773c1c8e97ec587f520840cd8134" + integrity sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ== dependencies: "@eslint-community/eslint-utils" "^4.4.0" "@types/json-schema" "^7.0.12" "@types/semver" "^7.5.0" - "@typescript-eslint/scope-manager" "6.7.3" - "@typescript-eslint/types" "6.7.3" - "@typescript-eslint/typescript-estree" "6.7.3" + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/typescript-estree" "6.21.0" semver "^7.5.4" -"@typescript-eslint/visitor-keys@6.7.3": - version "6.7.3" - resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.3.tgz#83809631ca12909bd2083558d2f93f5747deebb2" - integrity sha512-HEVXkU9IB+nk9o63CeICMHxFWbHWr3E1mpilIQBe9+7L/lH97rleFLVtYsfnWB+JVMaiFnEaxvknvmIzX+CqVg== +"@typescript-eslint/visitor-keys@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz#87a99d077aa507e20e238b11d56cc26ade45fe47" + integrity sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A== dependencies: - "@typescript-eslint/types" "6.7.3" + "@typescript-eslint/types" "6.21.0" eslint-visitor-keys "^3.4.1" abort-controller@^3.0.0: @@ -1314,11 +1312,6 @@ co@^4.6.0: resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" integrity sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ== -code-block-writer@^12.0.0: - version "12.0.0" - resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-12.0.0.tgz#4dd58946eb4234105aff7f0035977b2afdc2a770" - integrity sha512-q4dMFMlXtKR3XNBHyMHt/3pwYNA69EDk00lloMOaaUMKPUXBw6lpXtbu3MMVG6/uOihGnRDOlkyqsONEUj60+w== - collect-v8-coverage@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz#c0b29bcd33bcd0779a1344c2136051e6afd3d9e9" @@ -1384,21 +1377,28 @@ create-require@^1.1.0: integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== cross-spawn@^7.0.2, cross-spawn@^7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + version "7.0.6" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" + integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== dependencies: path-key "^3.1.0" shebang-command "^2.0.0" which "^2.0.1" -debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4: +debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2: version "4.3.4" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== dependencies: ms "2.1.2" +debug@^4.3.4: + version "4.3.7" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.7.tgz#87945b4151a011d76d95a198d7111c865c360a52" + integrity sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ== + dependencies: + ms "^2.1.3" + dedent@^1.0.0: version "1.5.1" resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.1.tgz#4f3fc94c8b711e9bb2800d185cd6ad20f2a90aff" @@ -1546,12 +1546,7 @@ eslint-scope@^7.2.2: esrecurse "^4.3.0" estraverse "^5.2.0" -eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1: - version "3.4.2" - resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.2.tgz#8c2095440eca8c933bedcadf16fefa44dbe9ba5f" - integrity sha512-8drBzUEyZ2llkpCA67iYrgEssKDUu68V8ChqqOfFupIaG/LCVPUT+CoGJpT77zJprs4T/W7p07LP7zAIMuweVw== - -eslint-visitor-keys@^3.4.3: +eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1, eslint-visitor-keys@^3.4.3: version "3.4.3" resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz#0cd72fe8550e3c2eae156a96a4dddcd1c8ac5800" integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== @@ -1716,18 +1711,7 @@ fast-glob@^3.2.12: merge2 "^1.3.0" micromatch "^4.0.4" -fast-glob@^3.2.9: - version "3.3.1" - resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.1.tgz#784b4e897340f3dbbef17413b3f11acf03c874c4" - integrity sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg== - dependencies: - "@nodelib/fs.stat" "^2.0.2" - "@nodelib/fs.walk" "^1.2.3" - glob-parent "^5.1.2" - merge2 "^1.3.0" - micromatch "^4.0.4" - -fast-glob@^3.3.0: +fast-glob@^3.2.9, fast-glob@^3.3.0: version "3.3.2" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129" integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow== @@ -1749,9 +1733,9 @@ fast-levenshtein@^2.0.6: integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== fastq@^1.6.0: - version "1.15.0" - resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.15.0.tgz#d04d07c6a2a68fe4599fea8d2e103a937fae6b3a" - integrity sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw== + version "1.17.1" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47" + integrity sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w== dependencies: reusify "^1.0.4" @@ -1974,9 +1958,9 @@ iconv-lite@^0.6.3: safer-buffer ">= 2.1.2 < 3.0.0" ignore@^5.2.0, ignore@^5.2.4: - version "5.2.4" - resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.4.tgz#a291c0c6178ff1b960befe47fcdec301674a6324" - integrity sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ== + version "5.3.2" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5" + integrity sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== import-fresh@^3.2.1: version "3.3.0" @@ -2681,6 +2665,13 @@ mimic-fn@^4.0.0: resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-4.0.0.tgz#60a90550d5cb0b239cca65d893b1a53b29871ecc" integrity sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw== +minimatch@9.0.3: + version "9.0.3" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.3.tgz#a6e00c3de44c3a542bfaae70abfc22420a6da825" + integrity sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg== + dependencies: + brace-expansion "^2.0.1" + minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2: version "3.1.2" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" @@ -2688,29 +2679,17 @@ minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2: dependencies: brace-expansion "^1.1.7" -minimatch@^7.4.3: - version "7.4.6" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-7.4.6.tgz#845d6f254d8f4a5e4fd6baf44d5f10c8448365fb" - integrity sha512-sBz8G/YjVniEz6lKPNpKxXwazJe4c19fEfV2GDMX6AjFz+MX9uDWIZW8XreVhkFW3fkIdTv/gxWr/Kks5FFAVw== - dependencies: - brace-expansion "^2.0.1" - minimist@^1.2.6: version "1.2.6" resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== -mkdirp@^2.1.6: - version "2.1.6" - resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-2.1.6.tgz#964fbcb12b2d8c5d6fbc62a963ac95a273e2cc19" - integrity sha512-+hEnITedc8LAtIP9u3HJDFIdcLV2vXP33sqLLIzkv1Db1zO/1OxbvYf0Y1OC/S/Qo5dxHXepofhmxL02PsKe+A== - ms@2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@^2.0.0: +ms@^2.0.0, ms@^2.1.3: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== @@ -2868,11 +2847,6 @@ parse-json@^5.2.0: json-parse-even-better-errors "^2.3.0" lines-and-columns "^1.1.6" -path-browserify@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-1.0.1.tgz#d98454a9c3753d5790860f16f68867b9e46be1fd" - integrity sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g== - path-exists@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" @@ -3075,13 +3049,18 @@ semver@^6.3.0, semver@^6.3.1: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.5.3, semver@^7.5.4: +semver@^7.5.3: version "7.5.4" resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e" integrity sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA== dependencies: lru-cache "^6.0.0" +semver@^7.5.4: + version "7.6.3" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" + integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== + shebang-command@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" @@ -3278,9 +3257,9 @@ tr46@~0.0.3: integrity sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o= ts-api-utils@^1.0.1: - version "1.0.3" - resolved "https://registry.yarnpkg.com/ts-api-utils/-/ts-api-utils-1.0.3.tgz#f12c1c781d04427313dbac808f453f050e54a331" - integrity sha512-wNMeqtMz5NtwpT/UZGY5alT+VoKdSsOOP/kqHFcUW1P/VRhH2wJ48+DN2WwUliNbQ976ETwDL0Ifd2VVvgonvg== + version "1.3.0" + resolved "https://registry.yarnpkg.com/ts-api-utils/-/ts-api-utils-1.3.0.tgz#4b490e27129f1e8e686b45cc4ab63714dc60eea1" + integrity sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ== ts-jest@^29.1.0: version "29.1.1" @@ -3296,14 +3275,6 @@ ts-jest@^29.1.0: semver "^7.5.3" yargs-parser "^21.0.1" -ts-morph@^19.0.0: - version "19.0.0" - resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-19.0.0.tgz#43e95fb0156c3fe3c77c814ac26b7d0be2f93169" - integrity sha512-D6qcpiJdn46tUqV45vr5UGM2dnIEuTGNxVhg0sk5NX11orcouwj6i1bMqZIz2mZTZB1Hcgy7C3oEVhAT+f6mbQ== - dependencies: - "@ts-morph/common" "~0.20.0" - code-block-writer "^12.0.0" - ts-node@^10.5.0: version "10.7.0" resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-10.7.0.tgz#35d503d0fab3e2baa672a0e94f4b40653c2463f5" @@ -3483,6 +3454,11 @@ write-file-atomic@^4.0.2: imurmurhash "^0.1.4" signal-exit "^3.0.7" +ws@^8.18.0: + version "8.18.0" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.18.0.tgz#0d7505a6eafe2b0e712d232b42279f53bc289bbc" + integrity sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw== + y18n@^5.0.5: version "5.0.8" resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55"