diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 8bcd8a5b4f..33a65d75c4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.76.2" + ".": "1.77.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index d92408173b..0c8278866d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 97 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-8b68ae6b807dca92e914da1dd9e835a20f69b075e79102a264367fd7fddddb33.yml -openapi_spec_hash: b6ade5b1a6327339e6669e1134de2d03 -config_hash: b597cd9a31e9e5ec709e2eefb4c54122 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-0ee6b36cf3cc278cef4199a6aec5f7d530a6c1f17a74830037e96d50ca1edc50.yml +openapi_spec_hash: e8ec5f46bc0655b34f292422d58a60f6 +config_hash: d9b6b6e6bc85744663e300eebc482067 diff --git a/CHANGELOG.md b/CHANGELOG.md index bc85128f6a..9097cdc65a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 1.77.0 (2025-05-02) + +Full Changelog: [v1.76.2...v1.77.0](https://github.com/openai/openai-python/compare/v1.76.2...v1.77.0) + +### Features + +* **api:** add image sizes, reasoning encryption ([473469a](https://github.com/openai/openai-python/commit/473469afa1a5f0a03f727bdcdadb9fd57872f9c5)) + + +### Bug Fixes + +* **parsing:** handle whitespace only strings ([#2007](https://github.com/openai/openai-python/issues/2007)) ([246bc5b](https://github.com/openai/openai-python/commit/246bc5b7559887840717667a0dad465caef66c3b)) + + +### Chores + +* only strip leading whitespace ([8467d66](https://github.com/openai/openai-python/commit/8467d666e0ddf1a9f81b8769a5c8a2fef1de20c1)) + ## 1.76.2 (2025-04-29) Full Changelog: [v1.76.1...v1.76.2](https://github.com/openai/openai-python/compare/v1.76.1...v1.76.2) diff --git a/pyproject.toml b/pyproject.toml index 2c3c3eaf3b..4b854b05e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.76.2" +version = "1.77.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index ef1e3fe526..9d8ba015e1 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.76.2" # x-release-please-version +__version__ = "1.77.0" # x-release-please-version diff --git a/src/openai/lib/streaming/chat/_completions.py b/src/openai/lib/streaming/chat/_completions.py index f147696cca..a7b70c32d3 100644 --- a/src/openai/lib/streaming/chat/_completions.py +++ b/src/openai/lib/streaming/chat/_completions.py @@ -438,6 +438,8 @@ def _accumulate_chunk(self, chunk: ChatCompletionChunk) -> ParsedChatCompletionS choice_snapshot.message.content and not choice_snapshot.message.refusal and is_given(self._rich_response_format) + # partial parsing fails on white-space + and choice_snapshot.message.content.lstrip() ): choice_snapshot.message.parsed = from_json( bytes(choice_snapshot.message.content, "utf-8"), diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index fad18dcdf5..a195d7135e 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -85,7 +85,7 @@ def create( `wav`, and `pcm`. speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - the default. + the default. Does not work with `gpt-4o-mini-tts`. extra_headers: Send extra headers @@ -176,7 +176,7 @@ async def create( `wav`, and `pcm`. speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is - the default. + the default. Does not work with `gpt-4o-mini-tts`. extra_headers: Send extra headers diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index e59d0ce35c..524bebacae 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -119,12 +119,14 @@ def edit( *, image: Union[FileTypes, List[FileTypes]], prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, mask: FileTypes | NotGiven = NOT_GIVEN, model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -139,14 +141,25 @@ def edit( This endpoint only supports `gpt-image-1` and `dall-e-2`. Args: - image: The image(s) to edit. Must be a supported image file or an array of images. For - `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than - 25MB. For `dall-e-2`, you can only provide one image, and it should be a square - `png` file less than 4MB. + image: The image(s) to edit. Must be a supported image file or an array of images. + + For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + 25MB. You can provide up to 16 images. + + For `dall-e-2`, you can only provide one image, and it should be a square `png` + file less than 4MB. prompt: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. If there are multiple images provided, the mask will be applied on the first image. Must be a valid PNG file, less than @@ -187,6 +200,7 @@ def edit( { "image": image, "prompt": prompt, + "background": background, "mask": mask, "model": model, "n": n, @@ -429,12 +443,14 @@ async def edit( *, image: Union[FileTypes, List[FileTypes]], prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, mask: FileTypes | NotGiven = NOT_GIVEN, model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -449,14 +465,25 @@ async def edit( This endpoint only supports `gpt-image-1` and `dall-e-2`. Args: - image: The image(s) to edit. Must be a supported image file or an array of images. For - `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than - 25MB. For `dall-e-2`, you can only provide one image, and it should be a square - `png` file less than 4MB. + image: The image(s) to edit. Must be a supported image file or an array of images. + + For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + 25MB. You can provide up to 16 images. + + For `dall-e-2`, you can only provide one image, and it should be a square `png` + file less than 4MB. prompt: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. If there are multiple images provided, the mask will be applied on the first image. Must be a valid PNG file, less than @@ -497,6 +524,7 @@ async def edit( { "image": image, "prompt": prompt, + "background": background, "mask": mask, "model": model, "n": n, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 4a0687f9f3..a905bc34b1 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -140,6 +140,11 @@ def create( - `message.input_image.image_url`: Include image urls from the input message. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). instructions: Inserts a system (or developer) message as the first item in the model's context. @@ -331,6 +336,11 @@ def create( - `message.input_image.image_url`: Include image urls from the input message. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). instructions: Inserts a system (or developer) message as the first item in the model's context. @@ -515,6 +525,11 @@ def create( - `message.input_image.image_url`: Include image urls from the input message. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). instructions: Inserts a system (or developer) message as the first item in the model's context. @@ -1013,6 +1028,11 @@ async def create( - `message.input_image.image_url`: Include image urls from the input message. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). instructions: Inserts a system (or developer) message as the first item in the model's context. @@ -1204,6 +1224,11 @@ async def create( - `message.input_image.image_url`: Include image urls from the input message. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). instructions: Inserts a system (or developer) message as the first item in the model's context. @@ -1388,6 +1413,11 @@ async def create( - `message.input_image.image_url`: Include image urls from the input message. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). instructions: Inserts a system (or developer) message as the first item in the model's context. diff --git a/src/openai/types/audio/speech_create_params.py b/src/openai/types/audio/speech_create_params.py index a4fc020532..905ca5c3a8 100644 --- a/src/openai/types/audio/speech_create_params.py +++ b/src/openai/types/audio/speech_create_params.py @@ -48,5 +48,6 @@ class SpeechCreateParams(TypedDict, total=False): speed: float """The speed of the generated audio. - Select a value from `0.25` to `4.0`. `1.0` is the default. + Select a value from `0.25` to `4.0`. `1.0` is the default. Does not work with + `gpt-4o-mini-tts`. """ diff --git a/src/openai/types/image_edit_params.py b/src/openai/types/image_edit_params.py index f01a12c1b0..6294e8ac19 100644 --- a/src/openai/types/image_edit_params.py +++ b/src/openai/types/image_edit_params.py @@ -13,12 +13,13 @@ class ImageEditParams(TypedDict, total=False): image: Required[Union[FileTypes, List[FileTypes]]] - """The image(s) to edit. + """The image(s) to edit. Must be a supported image file or an array of images. - Must be a supported image file or an array of images. For `gpt-image-1`, each - image should be a `png`, `webp`, or `jpg` file less than 25MB. For `dall-e-2`, - you can only provide one image, and it should be a square `png` file less than - 4MB. + For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + 25MB. You can provide up to 16 images. + + For `dall-e-2`, you can only provide one image, and it should be a square `png` + file less than 4MB. """ prompt: Required[str] @@ -28,6 +29,17 @@ class ImageEditParams(TypedDict, total=False): `gpt-image-1`. """ + background: Optional[Literal["transparent", "opaque", "auto"]] + """Allows to set transparency for the background of the generated image(s). + + This parameter is only supported for `gpt-image-1`. Must be one of + `transparent`, `opaque` or `auto` (default value). When `auto` is used, the + model will automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + """ + mask: FileTypes """An additional image whose fully transparent areas (e.g. @@ -61,7 +73,7 @@ class ImageEditParams(TypedDict, total=False): `gpt-image-1` will always return base64-encoded images. """ - size: Optional[Literal["256x256", "512x512", "1024x1024"]] + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] """The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or diff --git a/src/openai/types/responses/computer_tool.py b/src/openai/types/responses/computer_tool.py index dffb7af7b7..5b844f5bf4 100644 --- a/src/openai/types/responses/computer_tool.py +++ b/src/openai/types/responses/computer_tool.py @@ -8,13 +8,13 @@ class ComputerTool(BaseModel): - display_height: float + display_height: int """The height of the computer display.""" - display_width: float + display_width: int """The width of the computer display.""" - environment: Literal["mac", "windows", "ubuntu", "browser"] + environment: Literal["windows", "mac", "linux", "ubuntu", "browser"] """The type of computer environment to control.""" type: Literal["computer_use_preview"] diff --git a/src/openai/types/responses/computer_tool_param.py b/src/openai/types/responses/computer_tool_param.py index 6b1072ffd2..06a5c132ec 100644 --- a/src/openai/types/responses/computer_tool_param.py +++ b/src/openai/types/responses/computer_tool_param.py @@ -8,13 +8,13 @@ class ComputerToolParam(TypedDict, total=False): - display_height: Required[float] + display_height: Required[int] """The height of the computer display.""" - display_width: Required[float] + display_width: Required[int] """The width of the computer display.""" - environment: Required[Literal["mac", "windows", "ubuntu", "browser"]] + environment: Required[Literal["windows", "mac", "linux", "ubuntu", "browser"]] """The type of computer environment to control.""" type: Required[Literal["computer_use_preview"]] diff --git a/src/openai/types/responses/file_search_tool.py b/src/openai/types/responses/file_search_tool.py index 683fc533fe..dbdd8cffab 100644 --- a/src/openai/types/responses/file_search_tool.py +++ b/src/openai/types/responses/file_search_tool.py @@ -9,7 +9,7 @@ __all__ = ["FileSearchTool", "Filters", "RankingOptions"] -Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter] +Filters: TypeAlias = Union[ComparisonFilter, CompoundFilter, None] class RankingOptions(BaseModel): @@ -17,10 +17,10 @@ class RankingOptions(BaseModel): """The ranker to use for the file search.""" score_threshold: Optional[float] = None - """ - The score threshold for the file search, a number between 0 and 1. Numbers - closer to 1 will attempt to return only the most relevant results, but may - return fewer results. + """The score threshold for the file search, a number between 0 and 1. + + Numbers closer to 1 will attempt to return only the most relevant results, but + may return fewer results. """ @@ -32,7 +32,7 @@ class FileSearchTool(BaseModel): """The IDs of the vector stores to search.""" filters: Optional[Filters] = None - """A filter to apply based on file attributes.""" + """A filter to apply.""" max_num_results: Optional[int] = None """The maximum number of results to return. diff --git a/src/openai/types/responses/file_search_tool_param.py b/src/openai/types/responses/file_search_tool_param.py index 2d6af8536b..2851fae460 100644 --- a/src/openai/types/responses/file_search_tool_param.py +++ b/src/openai/types/responses/file_search_tool_param.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union +from typing import List, Union, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..shared_params.compound_filter import CompoundFilter @@ -18,10 +18,10 @@ class RankingOptions(TypedDict, total=False): """The ranker to use for the file search.""" score_threshold: float - """ - The score threshold for the file search, a number between 0 and 1. Numbers - closer to 1 will attempt to return only the most relevant results, but may - return fewer results. + """The score threshold for the file search, a number between 0 and 1. + + Numbers closer to 1 will attempt to return only the most relevant results, but + may return fewer results. """ @@ -32,8 +32,8 @@ class FileSearchToolParam(TypedDict, total=False): vector_store_ids: Required[List[str]] """The IDs of the vector stores to search.""" - filters: Filters - """A filter to apply based on file attributes.""" + filters: Optional[Filters] + """A filter to apply.""" max_num_results: int """The maximum number of results to return. diff --git a/src/openai/types/responses/function_tool.py b/src/openai/types/responses/function_tool.py index 236a2c7c63..d881565356 100644 --- a/src/openai/types/responses/function_tool.py +++ b/src/openai/types/responses/function_tool.py @@ -12,10 +12,10 @@ class FunctionTool(BaseModel): name: str """The name of the function to call.""" - parameters: Dict[str, object] + parameters: Optional[Dict[str, object]] = None """A JSON schema object describing the parameters of the function.""" - strict: bool + strict: Optional[bool] = None """Whether to enforce strict parameter validation. Default `true`.""" type: Literal["function"] diff --git a/src/openai/types/responses/function_tool_param.py b/src/openai/types/responses/function_tool_param.py index 774a22e336..56bab36f47 100644 --- a/src/openai/types/responses/function_tool_param.py +++ b/src/openai/types/responses/function_tool_param.py @@ -12,10 +12,10 @@ class FunctionToolParam(TypedDict, total=False): name: Required[str] """The name of the function to call.""" - parameters: Required[Dict[str, object]] + parameters: Required[Optional[Dict[str, object]]] """A JSON schema object describing the parameters of the function.""" - strict: Required[bool] + strict: Required[Optional[bool]] """Whether to enforce strict parameter validation. Default `true`.""" type: Required[Literal["function"]] diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 3c0a9d7b8a..972d413926 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -56,6 +56,11 @@ class ResponseCreateParamsBase(TypedDict, total=False): - `message.input_image.image_url`: Include image urls from the input message. - `computer_call_output.output.image_url`: Include image urls from the computer call output. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). """ instructions: Optional[str] diff --git a/src/openai/types/responses/response_includable.py b/src/openai/types/responses/response_includable.py index 83489fa7f1..a01dddd71d 100644 --- a/src/openai/types/responses/response_includable.py +++ b/src/openai/types/responses/response_includable.py @@ -5,5 +5,8 @@ __all__ = ["ResponseIncludable"] ResponseIncludable: TypeAlias = Literal[ - "file_search_call.results", "message.input_image.image_url", "computer_call_output.output.image_url" + "file_search_call.results", + "message.input_image.image_url", + "computer_call_output.output.image_url", + "reasoning.encrypted_content", ] diff --git a/src/openai/types/responses/response_input_file_param.py b/src/openai/types/responses/response_input_file_param.py index dc06a4ea2d..61ae46f0cb 100644 --- a/src/openai/types/responses/response_input_file_param.py +++ b/src/openai/types/responses/response_input_file_param.py @@ -2,6 +2,7 @@ from __future__ import annotations +from typing import Optional from typing_extensions import Literal, Required, TypedDict __all__ = ["ResponseInputFileParam"] @@ -14,7 +15,7 @@ class ResponseInputFileParam(TypedDict, total=False): file_data: str """The content of the file to be sent to the model.""" - file_id: str + file_id: Optional[str] """The ID of the file to be sent to the model.""" filename: str diff --git a/src/openai/types/responses/response_input_image.py b/src/openai/types/responses/response_input_image.py index d719f44e9b..f2d760b25e 100644 --- a/src/openai/types/responses/response_input_image.py +++ b/src/openai/types/responses/response_input_image.py @@ -9,7 +9,7 @@ class ResponseInputImage(BaseModel): - detail: Literal["high", "low", "auto"] + detail: Literal["low", "high", "auto"] """The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`. diff --git a/src/openai/types/responses/response_input_image_param.py b/src/openai/types/responses/response_input_image_param.py index 5dd4db2b5d..bc17e4f1c2 100644 --- a/src/openai/types/responses/response_input_image_param.py +++ b/src/openai/types/responses/response_input_image_param.py @@ -9,7 +9,7 @@ class ResponseInputImageParam(TypedDict, total=False): - detail: Required[Literal["high", "low", "auto"]] + detail: Required[Literal["low", "high", "auto"]] """The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`. diff --git a/src/openai/types/responses/response_input_item_param.py b/src/openai/types/responses/response_input_item_param.py index 2505f7c0b5..290953a0ef 100644 --- a/src/openai/types/responses/response_input_item_param.py +++ b/src/openai/types/responses/response_input_item_param.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Union, Iterable +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict from .easy_input_message_param import EasyInputMessageParam @@ -50,10 +50,10 @@ class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): id: Required[str] """The ID of the pending safety check.""" - code: Required[str] + code: Optional[str] """The type of the pending safety check.""" - message: Required[str] + message: Optional[str] """Details about the pending safety check.""" @@ -67,16 +67,16 @@ class ComputerCallOutput(TypedDict, total=False): type: Required[Literal["computer_call_output"]] """The type of the computer tool call output. Always `computer_call_output`.""" - id: str + id: Optional[str] """The ID of the computer tool call output.""" - acknowledged_safety_checks: Iterable[ComputerCallOutputAcknowledgedSafetyCheck] + acknowledged_safety_checks: Optional[Iterable[ComputerCallOutputAcknowledgedSafetyCheck]] """ The safety checks reported by the API that have been acknowledged by the developer. """ - status: Literal["in_progress", "completed", "incomplete"] + status: Optional[Literal["in_progress", "completed", "incomplete"]] """The status of the message input. One of `in_progress`, `completed`, or `incomplete`. Populated when input items @@ -94,13 +94,13 @@ class FunctionCallOutput(TypedDict, total=False): type: Required[Literal["function_call_output"]] """The type of the function tool call output. Always `function_call_output`.""" - id: str + id: Optional[str] """The unique ID of the function tool call output. Populated when this item is returned via API. """ - status: Literal["in_progress", "completed", "incomplete"] + status: Optional[Literal["in_progress", "completed", "incomplete"]] """The status of the item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are @@ -112,7 +112,7 @@ class ItemReference(TypedDict, total=False): id: Required[str] """The ID of the item to reference.""" - type: Required[Literal["item_reference"]] + type: Optional[Literal["item_reference"]] """The type of item to reference. Always `item_reference`.""" diff --git a/src/openai/types/responses/response_input_param.py b/src/openai/types/responses/response_input_param.py index 84a80eb7c2..b24182697a 100644 --- a/src/openai/types/responses/response_input_param.py +++ b/src/openai/types/responses/response_input_param.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union, Iterable +from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict from .easy_input_message_param import EasyInputMessageParam @@ -51,10 +51,10 @@ class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): id: Required[str] """The ID of the pending safety check.""" - code: Required[str] + code: Optional[str] """The type of the pending safety check.""" - message: Required[str] + message: Optional[str] """Details about the pending safety check.""" @@ -68,16 +68,16 @@ class ComputerCallOutput(TypedDict, total=False): type: Required[Literal["computer_call_output"]] """The type of the computer tool call output. Always `computer_call_output`.""" - id: str + id: Optional[str] """The ID of the computer tool call output.""" - acknowledged_safety_checks: Iterable[ComputerCallOutputAcknowledgedSafetyCheck] + acknowledged_safety_checks: Optional[Iterable[ComputerCallOutputAcknowledgedSafetyCheck]] """ The safety checks reported by the API that have been acknowledged by the developer. """ - status: Literal["in_progress", "completed", "incomplete"] + status: Optional[Literal["in_progress", "completed", "incomplete"]] """The status of the message input. One of `in_progress`, `completed`, or `incomplete`. Populated when input items @@ -95,13 +95,13 @@ class FunctionCallOutput(TypedDict, total=False): type: Required[Literal["function_call_output"]] """The type of the function tool call output. Always `function_call_output`.""" - id: str + id: Optional[str] """The unique ID of the function tool call output. Populated when this item is returned via API. """ - status: Literal["in_progress", "completed", "incomplete"] + status: Optional[Literal["in_progress", "completed", "incomplete"]] """The status of the item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are @@ -113,7 +113,7 @@ class ItemReference(TypedDict, total=False): id: Required[str] """The ID of the item to reference.""" - type: Required[Literal["item_reference"]] + type: Optional[Literal["item_reference"]] """The type of item to reference. Always `item_reference`.""" diff --git a/src/openai/types/responses/response_reasoning_item.py b/src/openai/types/responses/response_reasoning_item.py index 57e5fbfe6d..f5da7802f8 100644 --- a/src/openai/types/responses/response_reasoning_item.py +++ b/src/openai/types/responses/response_reasoning_item.py @@ -28,6 +28,12 @@ class ResponseReasoningItem(BaseModel): type: Literal["reasoning"] """The type of the object. Always `reasoning`.""" + encrypted_content: Optional[str] = None + """ + The encrypted content of the reasoning item - populated when a response is + generated with `reasoning.encrypted_content` in the `include` parameter. + """ + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None """The status of the item. diff --git a/src/openai/types/responses/response_reasoning_item_param.py b/src/openai/types/responses/response_reasoning_item_param.py index adb49d6402..2cfa5312ed 100644 --- a/src/openai/types/responses/response_reasoning_item_param.py +++ b/src/openai/types/responses/response_reasoning_item_param.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Iterable +from typing import Iterable, Optional from typing_extensions import Literal, Required, TypedDict __all__ = ["ResponseReasoningItemParam", "Summary"] @@ -28,6 +28,12 @@ class ResponseReasoningItemParam(TypedDict, total=False): type: Required[Literal["reasoning"]] """The type of the object. Always `reasoning`.""" + encrypted_content: Optional[str] + """ + The encrypted content of the reasoning item - populated when a response is + generated with `reasoning.encrypted_content` in the `include` parameter. + """ + status: Literal["in_progress", "completed", "incomplete"] """The status of the item. diff --git a/src/openai/types/responses/tool.py b/src/openai/types/responses/tool.py index de5d5524d4..d96abdbe5a 100644 --- a/src/openai/types/responses/tool.py +++ b/src/openai/types/responses/tool.py @@ -12,5 +12,5 @@ __all__ = ["Tool"] Tool: TypeAlias = Annotated[ - Union[FileSearchTool, FunctionTool, ComputerTool, WebSearchTool], PropertyInfo(discriminator="type") + Union[FileSearchTool, FunctionTool, WebSearchTool, ComputerTool], PropertyInfo(discriminator="type") ] diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py index be1cf82452..200c347005 100644 --- a/src/openai/types/responses/tool_param.py +++ b/src/openai/types/responses/tool_param.py @@ -13,6 +13,6 @@ __all__ = ["ToolParam"] -ToolParam: TypeAlias = Union[FileSearchToolParam, FunctionToolParam, ComputerToolParam, WebSearchToolParam] +ToolParam: TypeAlias = Union[FileSearchToolParam, FunctionToolParam, WebSearchToolParam, ComputerToolParam] ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionToolParam] diff --git a/src/openai/types/responses/web_search_tool.py b/src/openai/types/responses/web_search_tool.py index bee270bf85..a6bf951145 100644 --- a/src/openai/types/responses/web_search_tool.py +++ b/src/openai/types/responses/web_search_tool.py @@ -33,16 +33,17 @@ class UserLocation(BaseModel): class WebSearchTool(BaseModel): type: Literal["web_search_preview", "web_search_preview_2025_03_11"] - """The type of the web search tool. One of: + """The type of the web search tool. - - `web_search_preview` - - `web_search_preview_2025_03_11` + One of `web_search_preview` or `web_search_preview_2025_03_11`. """ search_context_size: Optional[Literal["low", "medium", "high"]] = None - """ - High level guidance for the amount of context window space to use for the - search. One of `low`, `medium`, or `high`. `medium` is the default. + """High level guidance for the amount of context window space to use for the + search. + + One of `low`, `medium`, or `high`. `medium` is the default. """ user_location: Optional[UserLocation] = None + """The user's location.""" diff --git a/src/openai/types/responses/web_search_tool_param.py b/src/openai/types/responses/web_search_tool_param.py index 8ee36ffb47..d0335c01a3 100644 --- a/src/openai/types/responses/web_search_tool_param.py +++ b/src/openai/types/responses/web_search_tool_param.py @@ -12,19 +12,19 @@ class UserLocation(TypedDict, total=False): type: Required[Literal["approximate"]] """The type of location approximation. Always `approximate`.""" - city: str + city: Optional[str] """Free text input for the city of the user, e.g. `San Francisco`.""" - country: str + country: Optional[str] """ The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user, e.g. `US`. """ - region: str + region: Optional[str] """Free text input for the region of the user, e.g. `California`.""" - timezone: str + timezone: Optional[str] """ The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g. `America/Los_Angeles`. @@ -33,16 +33,17 @@ class UserLocation(TypedDict, total=False): class WebSearchToolParam(TypedDict, total=False): type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]] - """The type of the web search tool. One of: + """The type of the web search tool. - - `web_search_preview` - - `web_search_preview_2025_03_11` + One of `web_search_preview` or `web_search_preview_2025_03_11`. """ search_context_size: Literal["low", "medium", "high"] - """ - High level guidance for the amount of context window space to use for the - search. One of `low`, `medium`, or `high`. `medium` is the default. + """High level guidance for the amount of context window space to use for the + search. + + One of `low`, `medium`, or `high`. `medium` is the default. """ user_location: Optional[UserLocation] + """The user's location.""" diff --git a/tests/api_resources/test_images.py b/tests/api_resources/test_images.py index 7997e9f5a1..7c61453bc1 100644 --- a/tests/api_resources/test_images.py +++ b/tests/api_resources/test_images.py @@ -73,6 +73,7 @@ def test_method_edit_with_all_params(self, client: OpenAI) -> None: image = client.images.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", + background="transparent", mask=b"raw file contents", model="string", n=1, @@ -218,6 +219,7 @@ async def test_method_edit_with_all_params(self, async_client: AsyncOpenAI) -> N image = await async_client.images.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", + background="transparent", mask=b"raw file contents", model="string", n=1,