diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e1ae39d559..d86fc0ea53 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,6 @@ jobs: lint: name: lint runs-on: ubuntu-latest - if: github.repository == 'openai/openai-python' steps: - uses: actions/checkout@v4 @@ -34,7 +33,6 @@ jobs: test: name: test runs-on: ubuntu-latest - if: github.repository == 'openai/openai-python' steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml deleted file mode 100644 index b3e1c679d4..0000000000 --- a/.github/workflows/create-releases.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Create releases -on: - schedule: - - cron: '0 5 * * *' # every day at 5am UTC - push: - branches: - - main - -jobs: - release: - name: release - if: github.ref == 'refs/heads/main' && github.repository == 'openai/openai-python' - runs-on: ubuntu-latest - environment: publish - - steps: - - uses: actions/checkout@v4 - - - uses: stainless-api/trigger-release-please@v1 - id: release - with: - repo: ${{ github.event.repository.full_name }} - stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - - - name: Install Rye - if: ${{ steps.release.outputs.releases_created }} - run: | - curl -sSf https://rye.astral.sh/get | bash - echo "$HOME/.rye/shims" >> $GITHUB_PATH - env: - RYE_VERSION: '0.44.0' - RYE_INSTALL_OPTION: '--yes' - - - name: Publish to PyPI - if: ${{ steps.release.outputs.releases_created }} - run: | - bash ./bin/publish-pypi - env: - PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 7096ca9832..403b895b7e 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -1,9 +1,13 @@ -# workflow for re-running publishing to PyPI in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/openai/openai-python/actions/workflows/publish-pypi.yml +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to PyPI in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/openai/openai-python/actions/workflows/publish-pypi.yml name: Publish PyPI on: workflow_dispatch: + release: + types: [published] + jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index e078964a6f..445f626d93 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -19,5 +19,4 @@ jobs: run: | bash ./bin/check-release-environment env: - STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1f79fd2d11..dac37ce406 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.30.1" + ".": "1.66.4" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index edc2aaf89f..1e04d7c268 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f763c1a35c8b9b02f1e31b9b2e09e21f98bfe8413e5079c86cbb07da2dd7779b.yml diff --git a/bin/check-release-environment b/bin/check-release-environment index 2cc5ad6352..5471b69edb 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,10 +2,6 @@ errors=() -if [ -z "${STAINLESS_API_KEY}" ]; then - errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") -fi - if [ -z "${PYPI_TOKEN}" ]; then errors+=("The OPENAI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi diff --git a/bin/publish-pypi b/bin/publish-pypi index 05bfccbb71..826054e924 100644 --- a/bin/publish-pypi +++ b/bin/publish-pypi @@ -3,7 +3,4 @@ set -eux mkdir -p dist rye build --clean -# Patching importlib-metadata version until upstream library version is updated -# https://github.com/pypa/twine/issues/977#issuecomment-2189800841 -"$HOME/.rye/self/bin/python3" -m pip install 'importlib-metadata==7.2.1' rye publish --yes --token=$PYPI_TOKEN diff --git a/pyproject.toml b/pyproject.toml index 13d703bc96..585db285c3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.30.1" +version = "1.66.4" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" @@ -88,7 +88,7 @@ typecheck = { chain = [ "typecheck:mypy" = "mypy ." [build-system] -requires = ["hatchling", "hatch-fancy-pypi-readme"] +requires = ["hatchling==1.26.3", "hatch-fancy-pypi-readme"] build-backend = "hatchling.build" [tool.hatch.build] diff --git a/src/openai/_models.py b/src/openai/_models.py index c4401ff868..b51a1bf5f9 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -65,7 +65,7 @@ from ._constants import RAW_RESPONSE_HEADER if TYPE_CHECKING: - from pydantic_core.core_schema import ModelField, LiteralSchema, ModelFieldsSchema + from pydantic_core.core_schema import ModelField, ModelSchema, LiteralSchema, ModelFieldsSchema __all__ = ["BaseModel", "GenericModel"] @@ -646,15 +646,18 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None: schema = model.__pydantic_core_schema__ + if schema["type"] == "definitions": + schema = schema["schema"] + if schema["type"] != "model": return None + schema = cast("ModelSchema", schema) fields_schema = schema["schema"] if fields_schema["type"] != "model-fields": return None fields_schema = cast("ModelFieldsSchema", fields_schema) - field = fields_schema["fields"].get(field_name) if not field: return None diff --git a/src/openai/_version.py b/src/openai/_version.py index 83411041ae..df2f60a7dc 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.30.1" # x-release-please-version +__version__ = "1.66.4" # x-release-please-version diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index 7e7ec19ec2..b7a299be12 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -49,7 +49,7 @@ def create( self, *, completion_window: Literal["24h"], - endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"], + endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -67,9 +67,9 @@ def create( is supported. endpoint: The endpoint to be used for all requests in the batch. Currently - `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - embedding inputs across all requests in the batch. + `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + are supported. Note that `/v1/embeddings` batches are also restricted to a + maximum of 50,000 embedding inputs across all requests in the batch. input_file_id: The ID of an uploaded file that contains requests for the new batch. @@ -259,7 +259,7 @@ async def create( self, *, completion_window: Literal["24h"], - endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"], + endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -277,9 +277,9 @@ async def create( is supported. endpoint: The endpoint to be used for all requests in the batch. Currently - `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - embedding inputs across all requests in the batch. + `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + are supported. Note that `/v1/embeddings` batches are also restricted to a + maximum of 50,000 embedding inputs across all requests in the batch. input_file_id: The ID of an uploaded file that contains requests for the new batch. diff --git a/src/openai/types/batch_create_params.py b/src/openai/types/batch_create_params.py index e5be1d2bac..cc95afd3ba 100644 --- a/src/openai/types/batch_create_params.py +++ b/src/openai/types/batch_create_params.py @@ -17,12 +17,13 @@ class BatchCreateParams(TypedDict, total=False): Currently only `24h` is supported. """ - endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"]] + endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]] """The endpoint to be used for all requests in the batch. - Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are - supported. Note that `/v1/embeddings` batches are also restricted to a maximum - of 50,000 embedding inputs across all requests in the batch. + Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and + `/v1/completions` are supported. Note that `/v1/embeddings` batches are also + restricted to a maximum of 50,000 embedding inputs across all requests in the + batch. """ input_file_id: Required[str] diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index dede513f1e..31b9cb5456 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -142,6 +142,9 @@ class ChatCompletionChunk(BaseModel): """ An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. When present, it - contains a null value except for the last chunk which contains the token usage - statistics for the entire request. + contains a null value **except for the last chunk** which contains the token + usage statistics for the entire request. + + **NOTE:** If the stream is interrupted or cancelled, you may not receive the + final usage chunk which contains the total token usage for the request. """ diff --git a/src/openai/types/chat/chat_completion_content_part_param.py b/src/openai/types/chat/chat_completion_content_part_param.py index 1293c54312..cbedc853ba 100644 --- a/src/openai/types/chat/chat_completion_content_part_param.py +++ b/src/openai/types/chat/chat_completion_content_part_param.py @@ -22,7 +22,7 @@ class FileFile(TypedDict, total=False): file_id: str """The ID of an uploaded file to use as input.""" - file_name: str + filename: str """The name of the file, used when passing the file to the model as a string.""" diff --git a/src/openai/types/chat/chat_completion_stream_options_param.py b/src/openai/types/chat/chat_completion_stream_options_param.py index fbf7291821..471e0eba98 100644 --- a/src/openai/types/chat/chat_completion_stream_options_param.py +++ b/src/openai/types/chat/chat_completion_stream_options_param.py @@ -12,6 +12,9 @@ class ChatCompletionStreamOptionsParam(TypedDict, total=False): """If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire - request, and the `choices` field will always be an empty array. All other chunks - will also include a `usage` field, but with a null value. + request, and the `choices` field will always be an empty array. + + All other chunks will also include a `usage` field, but with a null value. + **NOTE:** If the stream is interrupted, you may not receive the final usage + chunk which contains the total token usage for the request. """ diff --git a/src/openai/types/responses/response_function_tool_call.py b/src/openai/types/responses/response_function_tool_call.py index 5d82906cb7..2a8482204e 100644 --- a/src/openai/types/responses/response_function_tool_call.py +++ b/src/openai/types/responses/response_function_tool_call.py @@ -9,9 +9,6 @@ class ResponseFunctionToolCall(BaseModel): - id: str - """The unique ID of the function tool call.""" - arguments: str """A JSON string of the arguments to pass to the function.""" @@ -24,6 +21,9 @@ class ResponseFunctionToolCall(BaseModel): type: Literal["function_call"] """The type of the function tool call. Always `function_call`.""" + id: Optional[str] = None + """The unique ID of the function tool call.""" + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None """The status of the item. diff --git a/src/openai/types/responses/response_function_tool_call_param.py b/src/openai/types/responses/response_function_tool_call_param.py index 51b947a764..eaa263cf67 100644 --- a/src/openai/types/responses/response_function_tool_call_param.py +++ b/src/openai/types/responses/response_function_tool_call_param.py @@ -8,9 +8,6 @@ class ResponseFunctionToolCallParam(TypedDict, total=False): - id: Required[str] - """The unique ID of the function tool call.""" - arguments: Required[str] """A JSON string of the arguments to pass to the function.""" @@ -23,6 +20,9 @@ class ResponseFunctionToolCallParam(TypedDict, total=False): type: Required[Literal["function_call"]] """The type of the function tool call. Always `function_call`.""" + id: str + """The unique ID of the function tool call.""" + status: Literal["in_progress", "completed", "incomplete"] """The status of the item. diff --git a/src/openai/types/responses/response_usage.py b/src/openai/types/responses/response_usage.py index ef631c5882..9ad36bd326 100644 --- a/src/openai/types/responses/response_usage.py +++ b/src/openai/types/responses/response_usage.py @@ -3,7 +3,15 @@ from ..._models import BaseModel -__all__ = ["ResponseUsage", "OutputTokensDetails"] +__all__ = ["ResponseUsage", "InputTokensDetails", "OutputTokensDetails"] + + +class InputTokensDetails(BaseModel): + cached_tokens: int + """The number of tokens that were retrieved from the cache. + + [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + """ class OutputTokensDetails(BaseModel): @@ -15,6 +23,9 @@ class ResponseUsage(BaseModel): input_tokens: int """The number of input tokens.""" + input_tokens_details: InputTokensDetails + """A detailed breakdown of the input tokens.""" + output_tokens: int """The number of output tokens.""" diff --git a/src/openai/types/shared/reasoning.py b/src/openai/types/shared/reasoning.py index 50821a1727..78a396d738 100644 --- a/src/openai/types/shared/reasoning.py +++ b/src/openai/types/shared/reasoning.py @@ -20,7 +20,7 @@ class Reasoning(BaseModel): """ generate_summary: Optional[Literal["concise", "detailed"]] = None - """**o-series models only** + """**computer_use_preview only** A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of `concise` or diff --git a/src/openai/types/shared_params/reasoning.py b/src/openai/types/shared_params/reasoning.py index f2b5c5963a..2953b895c4 100644 --- a/src/openai/types/shared_params/reasoning.py +++ b/src/openai/types/shared_params/reasoning.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, TypedDict from ..shared.reasoning_effort import ReasoningEffort @@ -11,7 +11,7 @@ class Reasoning(TypedDict, total=False): - effort: Required[Optional[ReasoningEffort]] + effort: Optional[ReasoningEffort] """**o-series models only** Constrains effort on reasoning for @@ -21,7 +21,7 @@ class Reasoning(TypedDict, total=False): """ generate_summary: Optional[Literal["concise", "detailed"]] - """**o-series models only** + """**computer_use_preview only** A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of `concise` or diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py index 047b8bae12..02eade0963 100644 --- a/tests/api_resources/test_batches.py +++ b/tests/api_resources/test_batches.py @@ -22,7 +22,7 @@ class TestBatches: def test_method_create(self, client: OpenAI) -> None: batch = client.batches.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", ) assert_matches_type(Batch, batch, path=["response"]) @@ -31,7 +31,7 @@ def test_method_create(self, client: OpenAI) -> None: def test_method_create_with_all_params(self, client: OpenAI) -> None: batch = client.batches.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", metadata={"foo": "string"}, ) @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: def test_raw_response_create(self, client: OpenAI) -> None: response = client.batches.with_raw_response.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", ) @@ -54,7 +54,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: def test_streaming_response_create(self, client: OpenAI) -> None: with client.batches.with_streaming_response.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", ) as response: assert not response.is_closed @@ -182,7 +182,7 @@ class TestAsyncBatches: async def test_method_create(self, async_client: AsyncOpenAI) -> None: batch = await async_client.batches.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", ) assert_matches_type(Batch, batch, path=["response"]) @@ -191,7 +191,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: batch = await async_client.batches.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", metadata={"foo": "string"}, ) @@ -201,7 +201,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.batches.with_raw_response.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", ) @@ -214,7 +214,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.batches.with_streaming_response.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="input_file_id", ) as response: assert not response.is_closed diff --git a/tests/test_models.py b/tests/test_models.py index 30b17e3ac0..b9be1f3ea3 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -854,3 +854,35 @@ class Model(BaseModel): m = construct_type(value={"cls": "foo"}, type_=Model) assert isinstance(m, Model) assert isinstance(m.cls, str) + + +def test_discriminated_union_case() -> None: + class A(BaseModel): + type: Literal["a"] + + data: bool + + class B(BaseModel): + type: Literal["b"] + + data: List[Union[A, object]] + + class ModelA(BaseModel): + type: Literal["modelA"] + + data: int + + class ModelB(BaseModel): + type: Literal["modelB"] + + required: str + + data: Union[A, B] + + # when constructing ModelA | ModelB, value data doesn't match ModelB exactly - missing `required` + m = construct_type( + value={"type": "modelB", "data": {"type": "a", "data": True}}, + type_=cast(Any, Annotated[Union[ModelA, ModelB], PropertyInfo(discriminator="type")]), + ) + + assert isinstance(m, ModelB)