Skip to content

fix(api): correct some Responses types #2217

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ jobs:
lint:
name: lint
runs-on: ubuntu-latest
if: github.repository == 'openai/openai-python'

steps:
- uses: actions/checkout@v4
Expand All @@ -34,7 +33,6 @@ jobs:
test:
name: test
runs-on: ubuntu-latest
if: github.repository == 'openai/openai-python'

steps:
- uses: actions/checkout@v4
Expand Down
39 changes: 0 additions & 39 deletions .github/workflows/create-releases.yml

This file was deleted.

8 changes: 6 additions & 2 deletions .github/workflows/publish-pypi.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
# workflow for re-running publishing to PyPI in case it fails for some reason
# you can run this workflow by navigating to https://www.github.com/openai/openai-python/actions/workflows/publish-pypi.yml
# This workflow is triggered when a GitHub release is created.
# It can also be run manually to re-publish to PyPI in case it failed for some reason.
# You can run this workflow by navigating to https://www.github.com/openai/openai-python/actions/workflows/publish-pypi.yml
name: Publish PyPI
on:
workflow_dispatch:

release:
types: [published]

jobs:
publish:
name: publish
Expand Down
1 change: 0 additions & 1 deletion .github/workflows/release-doctor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,5 +19,4 @@ jobs:
run: |
bash ./bin/check-release-environment
env:
STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }}
PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }}
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.30.1"
".": "1.66.4"
}
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 81
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f763c1a35c8b9b02f1e31b9b2e09e21f98bfe8413e5079c86cbb07da2dd7779b.yml
4 changes: 0 additions & 4 deletions bin/check-release-environment
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,6 @@

errors=()

if [ -z "${STAINLESS_API_KEY}" ]; then
errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.")
fi

if [ -z "${PYPI_TOKEN}" ]; then
errors+=("The OPENAI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.")
fi
Expand Down
3 changes: 0 additions & 3 deletions bin/publish-pypi
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,4 @@
set -eux
mkdir -p dist
rye build --clean
# Patching importlib-metadata version until upstream library version is updated
# https://github.com/pypa/twine/issues/977#issuecomment-2189800841
"$HOME/.rye/self/bin/python3" -m pip install 'importlib-metadata==7.2.1'
rye publish --yes --token=$PYPI_TOKEN
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "1.30.1"
version = "1.66.4"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down Expand Up @@ -88,7 +88,7 @@ typecheck = { chain = [
"typecheck:mypy" = "mypy ."

[build-system]
requires = ["hatchling", "hatch-fancy-pypi-readme"]
requires = ["hatchling==1.26.3", "hatch-fancy-pypi-readme"]
build-backend = "hatchling.build"

[tool.hatch.build]
Expand Down
7 changes: 5 additions & 2 deletions src/openai/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@
from ._constants import RAW_RESPONSE_HEADER

if TYPE_CHECKING:
from pydantic_core.core_schema import ModelField, LiteralSchema, ModelFieldsSchema
from pydantic_core.core_schema import ModelField, ModelSchema, LiteralSchema, ModelFieldsSchema

__all__ = ["BaseModel", "GenericModel"]

Expand Down Expand Up @@ -646,15 +646,18 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,

def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None:
schema = model.__pydantic_core_schema__
if schema["type"] == "definitions":
schema = schema["schema"]

if schema["type"] != "model":
return None

schema = cast("ModelSchema", schema)
fields_schema = schema["schema"]
if fields_schema["type"] != "model-fields":
return None

fields_schema = cast("ModelFieldsSchema", fields_schema)

field = fields_schema["fields"].get(field_name)
if not field:
return None
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "1.30.1" # x-release-please-version
__version__ = "1.66.4" # x-release-please-version
16 changes: 8 additions & 8 deletions src/openai/resources/batches.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def create(
self,
*,
completion_window: Literal["24h"],
endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
input_file_id: str,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
Expand All @@ -67,9 +67,9 @@ def create(
is supported.

endpoint: The endpoint to be used for all requests in the batch. Currently
`/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
embedding inputs across all requests in the batch.
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
are supported. Note that `/v1/embeddings` batches are also restricted to a
maximum of 50,000 embedding inputs across all requests in the batch.

input_file_id: The ID of an uploaded file that contains requests for the new batch.

Expand Down Expand Up @@ -259,7 +259,7 @@ async def create(
self,
*,
completion_window: Literal["24h"],
endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
input_file_id: str,
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
Expand All @@ -277,9 +277,9 @@ async def create(
is supported.

endpoint: The endpoint to be used for all requests in the batch. Currently
`/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
embedding inputs across all requests in the batch.
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
are supported. Note that `/v1/embeddings` batches are also restricted to a
maximum of 50,000 embedding inputs across all requests in the batch.

input_file_id: The ID of an uploaded file that contains requests for the new batch.

Expand Down
9 changes: 5 additions & 4 deletions src/openai/types/batch_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,13 @@ class BatchCreateParams(TypedDict, total=False):
Currently only `24h` is supported.
"""

endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"]]
endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]]
"""The endpoint to be used for all requests in the batch.

Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are
supported. Note that `/v1/embeddings` batches are also restricted to a maximum
of 50,000 embedding inputs across all requests in the batch.
Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and
`/v1/completions` are supported. Note that `/v1/embeddings` batches are also
restricted to a maximum of 50,000 embedding inputs across all requests in the
batch.
"""

input_file_id: Required[str]
Expand Down
7 changes: 5 additions & 2 deletions src/openai/types/chat/chat_completion_chunk.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,9 @@ class ChatCompletionChunk(BaseModel):
"""
An optional field that will only be present when you set
`stream_options: {"include_usage": true}` in your request. When present, it
contains a null value except for the last chunk which contains the token usage
statistics for the entire request.
contains a null value **except for the last chunk** which contains the token
usage statistics for the entire request.

**NOTE:** If the stream is interrupted or cancelled, you may not receive the
final usage chunk which contains the total token usage for the request.
"""
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class FileFile(TypedDict, total=False):
file_id: str
"""The ID of an uploaded file to use as input."""

file_name: str
filename: str
"""The name of the file, used when passing the file to the model as a string."""


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ class ChatCompletionStreamOptionsParam(TypedDict, total=False):
"""If set, an additional chunk will be streamed before the `data: [DONE]` message.

The `usage` field on this chunk shows the token usage statistics for the entire
request, and the `choices` field will always be an empty array. All other chunks
will also include a `usage` field, but with a null value.
request, and the `choices` field will always be an empty array.

All other chunks will also include a `usage` field, but with a null value.
**NOTE:** If the stream is interrupted, you may not receive the final usage
chunk which contains the total token usage for the request.
"""
6 changes: 3 additions & 3 deletions src/openai/types/responses/response_function_tool_call.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,6 @@


class ResponseFunctionToolCall(BaseModel):
id: str
"""The unique ID of the function tool call."""

arguments: str
"""A JSON string of the arguments to pass to the function."""

Expand All @@ -24,6 +21,9 @@ class ResponseFunctionToolCall(BaseModel):
type: Literal["function_call"]
"""The type of the function tool call. Always `function_call`."""

id: Optional[str] = None
"""The unique ID of the function tool call."""

status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the item.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,6 @@


class ResponseFunctionToolCallParam(TypedDict, total=False):
id: Required[str]
"""The unique ID of the function tool call."""

arguments: Required[str]
"""A JSON string of the arguments to pass to the function."""

Expand All @@ -23,6 +20,9 @@ class ResponseFunctionToolCallParam(TypedDict, total=False):
type: Required[Literal["function_call"]]
"""The type of the function tool call. Always `function_call`."""

id: str
"""The unique ID of the function tool call."""

status: Literal["in_progress", "completed", "incomplete"]
"""The status of the item.

Expand Down
13 changes: 12 additions & 1 deletion src/openai/types/responses/response_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,15 @@

from ..._models import BaseModel

__all__ = ["ResponseUsage", "OutputTokensDetails"]
__all__ = ["ResponseUsage", "InputTokensDetails", "OutputTokensDetails"]


class InputTokensDetails(BaseModel):
cached_tokens: int
"""The number of tokens that were retrieved from the cache.

[More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
"""


class OutputTokensDetails(BaseModel):
Expand All @@ -15,6 +23,9 @@ class ResponseUsage(BaseModel):
input_tokens: int
"""The number of input tokens."""

input_tokens_details: InputTokensDetails
"""A detailed breakdown of the input tokens."""

output_tokens: int
"""The number of output tokens."""

Expand Down
2 changes: 1 addition & 1 deletion src/openai/types/shared/reasoning.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class Reasoning(BaseModel):
"""

generate_summary: Optional[Literal["concise", "detailed"]] = None
"""**o-series models only**
"""**computer_use_preview only**

A summary of the reasoning performed by the model. This can be useful for
debugging and understanding the model's reasoning process. One of `concise` or
Expand Down
6 changes: 3 additions & 3 deletions src/openai/types/shared_params/reasoning.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@
from __future__ import annotations

from typing import Optional
from typing_extensions import Literal, Required, TypedDict
from typing_extensions import Literal, TypedDict

from ..shared.reasoning_effort import ReasoningEffort

__all__ = ["Reasoning"]


class Reasoning(TypedDict, total=False):
effort: Required[Optional[ReasoningEffort]]
effort: Optional[ReasoningEffort]
"""**o-series models only**

Constrains effort on reasoning for
Expand All @@ -21,7 +21,7 @@ class Reasoning(TypedDict, total=False):
"""

generate_summary: Optional[Literal["concise", "detailed"]]
"""**o-series models only**
"""**computer_use_preview only**

A summary of the reasoning performed by the model. This can be useful for
debugging and understanding the model's reasoning process. One of `concise` or
Expand Down
Loading